@ -96,7 +96,6 @@ def main():
print ( " webcam dimensions = {} x {} " . format ( width , height ) )
print ( " webcam dimensions = {} x {} " . format ( width , height ) )
# load models
# load models
previous = None
net = load_face_model ( )
net = load_face_model ( )
generator , kp_detector = demo . load_checkpoints ( config_path = f ' { first_order_path } config/vox-adv-256.yaml ' , checkpoint_path = f ' { model_path } /vox-adv-cpk.pth.tar ' )
generator , kp_detector = demo . load_checkpoints ( config_path = f ' { first_order_path } config/vox-adv-256.yaml ' , checkpoint_path = f ' { model_path } /vox-adv-cpk.pth.tar ' )
@ -108,9 +107,7 @@ def main():
cv2 . namedWindow ( ' DeepFake ' , cv2 . WINDOW_GUI_NORMAL ) # face transformation
cv2 . namedWindow ( ' DeepFake ' , cv2 . WINDOW_GUI_NORMAL ) # face transformation
cv2 . moveWindow ( ' DeepFake ' , int ( screen_width / / 2 ) + 150 , 100 )
cv2 . moveWindow ( ' DeepFake ' , int ( screen_width / / 2 ) + 150 , 100 )
img_shape = source_image . shape
cv2 . resizeWindow ( ' DeepFake ' , 256 , 256 )
cv2 . resizeWindow ( ' DeepFake ' , int ( img_shape [ 1 ] / / img_shape [ 0 ] * 256 ) , 256 )
cv2 . namedWindow ( ' Stream ' , cv2 . WINDOW_GUI_NORMAL ) # rendered to fake webcam
cv2 . namedWindow ( ' Stream ' , cv2 . WINDOW_GUI_NORMAL ) # rendered to fake webcam
cv2 . moveWindow ( ' Stream ' , int ( screen_width / / 2 ) - int ( webcam_width / / 2 ) , 400 )
cv2 . moveWindow ( ' Stream ' , int ( screen_width / / 2 ) - int ( webcam_width / / 2 ) , 400 )
@ -120,6 +117,8 @@ def main():
print ( " Press C to center Webcam, Press B/N for previous/next image in media directory, T to alter between relative and absolute transformation, Q to quit " )
print ( " Press C to center Webcam, Press B/N for previous/next image in media directory, T to alter between relative and absolute transformation, Q to quit " )
x1 , y1 , x2 , y2 = [ 0 , 0 , 0 , 0 ]
x1 , y1 , x2 , y2 = [ 0 , 0 , 0 , 0 ]
relative = True
relative = True
previous = None
while True :
while True :
ret , frame = video_capture . read ( )
ret , frame = video_capture . read ( )
frame = cv2 . resize ( frame , ( 640 , 480 ) )
frame = cv2 . resize ( frame , ( 640 , 480 ) )
@ -134,17 +133,13 @@ def main():
#cv2.imshow('Previous',previous)
#cv2.imshow('Previous',previous)
curr_face = cut_face_window ( x1 , y1 , x2 , y2 , frame )
curr_face = cut_face_window ( x1 , y1 , x2 , y2 , frame . copy ( ) )
cv2 . imshow ( ' Previous ' , previous )
# cv2.imshow('Previous',previous)
cv2 . imshow ( ' Curr Face ' , curr_face )
# cv2.imshow('Curr Face',curr_face)
cv2 . imshow ( ' Source Image ' , source_image )
# cv2.imshow('Source Image',source_image)
print ( ' previous= ' , previous . shape )
print ( ' curr_face= ' , curr_face . shape )
print ( ' source= ' , source_image . shape )
deep_fake = process_image ( source_image , previous , curr_face , net , generator , kp_detector , relative )
deep_fake = process_image ( source_image , previous , curr_face , net , generator , kp_detector , relative )
print ( " deep_fake " , deep_fake . shape )
#print("deep_fake",deep_fake.shape)
deep_fake = cv2 . cvtColor ( deep_fake , cv2 . COLOR_RGB2BGR )
deep_fake = cv2 . cvtColor ( deep_fake , cv2 . COLOR_RGB2BGR )
@ -154,26 +149,23 @@ def main():
x_border = int ( ( 640 - ( img_shape [ 1 ] / / img_shape [ 0 ] * 480 ) ) / / 2 )
x_border = int ( ( 640 - ( img_shape [ 1 ] / / img_shape [ 0 ] * 480 ) ) / / 2 )
#y_border = int((480-(img_shape[0] // img_shape[1] * 640))//2)
#y_border = int((480-(img_shape[0] // img_shape[1] * 640))//2)
stream_v = cv2 . copyMakeBorder ( rgb , 0 , 0 , x_border if x_border > = 0 else 0 , x_border if x_border > = 0 else 0 , cv2 . BORDER_CONSTANT )
stream_v = cv2 . copyMakeBorder ( rgb , 0 , 0 , x_border if x_border > = 0 else 0 , x_border if x_border > = 0 else 0 , cv2 . BORDER_CONSTANT )
cv2 . imshow ( ' Webcam ' , frame )
#cv2.imshow('Webcam', frame)
cv2 . imshow ( ' Face ' , curr_face )
cv2 . imshow ( ' Face ' , curr_face )
cv2 . imshow ( ' DeepFake ' , deep_fake )
cv2 . imshow ( ' DeepFake ' , deep_fake )
#cv2.imshow('Previous', previous)
#cv2.imshow('Previous', previous)
#cv2.imshow('RGB', rgb)
#cv2.imshow('RGB', rgb)
#cv2.imshow('Source Image', source_image)
#cv2.imshow('Source Image', source_image)
#time.sleep(1/30.0)
cv2 . imshow ( ' Stream ' , stream_v )
cv2 . imshow ( ' Stream ' , stream_v )
#time.sleep(1/30.0)
# stream to fakewebcam
if system == " linux " :
stream_v = cv2 . flip ( stream_v , 1 )
stream_v = cv2 . flip ( stream_v , 1 )
stream_v = cv2 . cvtColor ( stream_v , cv2 . COLOR_BGR2RGB )
stream_v = cv2 . cvtColor ( stream_v , cv2 . COLOR_BGR2RGB )
stream_v = ( stream_v * 255 ) . astype ( np . uint8 )
stream_v = ( stream_v * 255 ) . astype ( np . uint8 )
# stream to fakewebcam
if system == " linux " :
#print("output to fakecam")
#print("output to fakecam")
camera . schedule_frame ( stream_v )
camera . schedule_frame ( stream_v )
@ -219,12 +211,13 @@ def load_face_model():
return net
return net
def cut_face_window ( x1 , y1 , x2 , y2 , frame ) :
def cut_face_window ( x1 , y1 , x2 , y2 , frame ) :
frame = frame . copy ( )
frame = frame [ y1 : y2 , x1 : x2 ]
frame = frame [ y1 : y2 , x1 : x2 ]
face = resize ( frame , ( 256 , 256 ) ) # [..., :3 ]
face = resize ( frame , ( 256 , 256 ) ) [ . . . , : 3 ]
return face
return face
# find the face in webcam stream and center a 256x256 window
# find the face in webcam stream and center a 256x256 window
def find_face_cut ( net , face ,previous = False ):
def find_face_cut ( net , face ):
blob = cv2 . dnn . blobFromImage ( face , 1.0 , ( 300 , 300 ) , [ 104 , 117 , 123 ] , False , False )
blob = cv2 . dnn . blobFromImage ( face , 1.0 , ( 300 , 300 ) , [ 104 , 117 , 123 ] , False , False )
frameWidth = 640
frameWidth = 640
frameHeight = 480
frameHeight = 480
@ -233,31 +226,33 @@ def find_face_cut(net,face,previous=False):
bboxes = [ ]
bboxes = [ ]
face_found = False
face_found = False
for i in range ( detections . shape [ 2 ] ) :
for i in range ( detections . shape [ 2 ] ) :
#print(i)
confidence = detections [ 0 , 0 , i , 2 ]
confidence = detections [ 0 , 0 , i , 2 ]
if confidence > 0.8 :
if confidence > 0.9 :
face_found = True
x1 = ( int ( detections [ 0 , 0 , i , 3 ] * frameWidth ) / / 2 ) * 2
x1 = int ( detections [ 0 , 0 , i , 3 ] * frameWidth )
y1 = ( int ( detections [ 0 , 0 , i , 4 ] * frameHeight ) / / 2 ) * 2
y1 = int ( detections [ 0 , 0 , i , 4 ] * frameHeight )
x2 = ( int ( detections [ 0 , 0 , i , 5 ] * frameWidth ) / / 2 ) * 2
x2 = int ( detections [ 0 , 0 , i , 5 ] * frameWidth )
y2 = ( int ( detections [ 0 , 0 , i , 6 ] * frameHeight ) / / 2 ) * 2
y2 = int ( detections [ 0 , 0 , i , 6 ] * frameHeight )
face_margin_w = int ( 256 - ( abs ( x1 - x2 ) ) )
face_margin_w = int ( 256 - ( abs ( x1 - x2 ) ) )
face_margin_h = int ( 256 - ( abs ( y1 - y2 ) ) )
face_margin_h = int ( 256 - ( abs ( y1 - y2 ) ) )
cut_x1 = ( x1 - int ( face_margin_w / / 2 ) )
cut_x1 = x1 - int ( face_margin_w / / 2 )
if cut_x1 < 0 : cut_x1 = 0
cut_y1 = y1 - int ( 2 * face_margin_h / / 3 )
cut_y1 = y1 - int ( 2 * face_margin_h / / 3 )
if cut_y1 < 0 : cut_y1 = 0
cut_x2 = x2 + int ( face_margin_w / / 2 )
cut_x2 = x2 + int ( face_margin_w / / 2 )
cut_y2 = y2 + int ( face_margin_h / / 3 )
cut_y2 = y2 + face_margin_h - int ( 2 * face_margin_h / / 3 )
face_found = True
break
if not face_found :
if not face_found :
print ( " No face detected in video " )
print ( " No face detected in video " )
# let's just use the middle section of the image
# let's just use the middle section of the image
cut_x1 , cut_y1 , cut_x2 , cut_y2 = 112 , 192 , 368 , 448
cut_x1 , cut_y1 , cut_x2 , cut_y2 = 112 , 192 , 368 , 448
else :
else :
print ( f ' Found face at: ( { x1 , y1 } ) ( { x2 } , { y2 } width: { abs ( x2 - x1 ) } height: { abs ( y2 - y1 ) } ) ' )
print ( f ' Found face at: ( { x1 , y1 } ) ( { x2 } , { y2 } width: { ( x2 - x1 ) } height: { ( y2 - y1 ) } ) ' )
print ( f ' Cutting at: ( { cut_x1 , cut_y1 } ) ( { cut_x2 } , { cut_y2 } width: { abs ( cut_x2 - cut_x1 ) } height: { abs ( cut_y2 - cut_y1 ) } ) ' )
print ( f ' Cutting at: ( { cut_x1 , cut_y1 } ) ( { cut_x2 } , { cut_y2 } width: { ( cut_x2 - cut_x1 ) } height: { ( cut_y2 - cut_y1 ) } ) ' )
return cut_x1 , cut_y1 , cut_x2 , cut_y2
return cut_x1 , cut_y1 , cut_x2 , cut_y2