File size: 5,845 Bytes
ba32b3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# """
# Crop upper boddy in every video frame, square bounding box is averaged among all frames and fixed.
# """

# import os
# import cv2
# import argparse
# from tqdm import tqdm
# import face_recognition
# import torch
# import util
# import numpy as np
# import face_detection

# def crop_per_image(data_dir, dest_size, crop_level):
#     fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')

#     image_list = util.get_file_list(os.path.join(data_dir, 'full'))
#     batch_size = 5
#     frames = []

#     for i in tqdm(range(len(image_list))):
#         frame = face_recognition.load_image_file(image_list[i])
#         frames.append(frame)

#     H, W, _ = frames[0].shape

#     batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]

#     for idx in tqdm(range(len(batches))):
#         fb = batches[idx]
#         preds = fa.get_detections_for_batch(np.asarray(fb))

#         for j, f in enumerate(preds):
#             if f is None:
#                 print('no face in image {}'.format(idx * batch_size + j))
#             else:
#                 left, top, right, bottom = f

            
#             height = bottom - top
#             width = right - left
#             crop_size = int(height * crop_level)

#             horizontal_delta = (crop_size - width) // 2
#             vertical_delta = (crop_size - height) // 2

#             left = max(left - horizontal_delta, 0)
#             right = min(right + horizontal_delta, W)
#             top = max(top - int(vertical_delta * 0.5), 0)
#             bottom = min(bottom + int(vertical_delta * 1.5), H)
            
#             crop_f = cv2.imread(image_list[idx * batch_size + j])
#             crop_f = crop_f[top:bottom, left:right]
#             crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
#             cv2.imwrite(os.path.join(data_dir, 'crop', os.path.basename(image_list[idx * batch_size + j])), crop_f)


# if __name__ == '__main__':
#     parser = argparse.ArgumentParser(description='Process some integers.')
#     parser.add_argument('--data_dir', type=str, default=None)
#     parser.add_argument('--dest_size', type=int, default=256)
#     parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop image size.')
#     parser.add_argument('--vertical_adjust', type=float, default=0.3, help='Adjust vertical location of portrait in image.')
#     args = parser.parse_args()
#     util.create_dir(os.path.join(args.data_dir,'crop'))
#     util.create_dir(os.path.join(args.data_dir, 'crop_region'))
#     crop_per_image(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level)


import os
import cv2
import argparse
from tqdm import tqdm
import face_recognition
import numpy as np
import face_detection
import util

def crop_per_frame_and_make_video(data_dir, dest_size, crop_level, video_out_path, fps=30):
    # Initialize face alignment
    fa = face_detection.FaceAlignment(face_detection.LandmarksType._2D, flip_input=False, device='cuda')

    # Get list of images (frames)
    image_list = util.get_file_list(os.path.join(data_dir, 'full'))
    batch_size = 5
    frames = []

    # Load frames
    for image_path in tqdm(image_list, desc='Loading images'):
        frame = cv2.imread(image_path)
        frames.append(frame)

    H, W, _ = frames[0].shape
    batches = [frames[i:i + batch_size] for i in range(0, len(frames), batch_size)]
    cropped_frames = []

    for idx, fb in enumerate(tqdm(batches, desc='Processing batches')):
        preds = fa.get_detections_for_batch(np.asarray(fb))

        for j, f in enumerate(preds):
            if f is None:
                print(f'No face in image {idx * batch_size + j}')
                continue  # Skip frames with no detected face
            
            left, top, right, bottom = f
            height = bottom - top
            width = right - left
            crop_size = int(height * crop_level)

            horizontal_delta = (crop_size - width) // 2
            vertical_delta = (crop_size - height) // 2

            left = max(left - horizontal_delta, 0)
            right = min(right + horizontal_delta, W)
            top = max(top - int(vertical_delta * 0.5), 0)
            bottom = min(bottom + int(vertical_delta * 1.5), H)
            
            crop_f = fb[j][top:bottom, left:right]
            crop_f = cv2.resize(crop_f, (dest_size, dest_size), interpolation=cv2.INTER_AREA)
            cropped_frames.append(crop_f)

    # Define the codec and create VideoWriter object
    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    out = cv2.VideoWriter(video_out_path, fourcc, fps, (dest_size, dest_size))

    # Write frames to video
    for frame in tqdm(cropped_frames, desc='Compiling video'):
        out.write(frame)

    # Release everything when job is finished
    out.release()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Crop video frames and compile into a video.')
    parser.add_argument('--data_dir', type=str, required=True, help='Directory with video frames to process.')
    parser.add_argument('--dest_size', type=int, default=256, help='Destination size of cropped images.')
    parser.add_argument('--crop_level', type=float, default=1.0, help='Adjust crop size relative to face detection.')
    parser.add_argument('--video_out_path', type=str, required=True, help='Output path for the resulting video.')
    parser.add_argument('--fps', type=int, default=30, help='Frames per second for the output video.')
    args = parser.parse_args()

    util.create_dir(os.path.join(args.data_dir, 'crop'))
    crop_per_frame_and_make_video(args.data_dir, dest_size=args.dest_size, crop_level=args.crop_level, video_out_path=args.video_out_path, fps=args.fps)