vlordier commited on
Commit
fc35e87
·
verified ·
1 Parent(s): db65255

Upload hf_job_face_embedding.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. hf_job_face_embedding.py +85 -28
hf_job_face_embedding.py CHANGED
@@ -57,22 +57,92 @@ def init_face_embedder(device='cuda'):
57
  return app
58
 
59
 
60
- def extract_embedding(app, image_bgr, bbox):
61
- """Extract face embedding from bbox region"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  try:
63
- x1, y1, x2, y2 = map(int, bbox)
64
- pad = 20
65
- h, w = image_bgr.shape[:2]
66
- x1 = max(0, x1 - pad)
67
- y1 = max(0, y1 - pad)
68
- x2 = min(w, x2 + pad)
69
- y2 = min(h, y2 + pad)
70
  crop = image_bgr[y1:y2, x1:x2]
71
 
 
 
 
 
 
 
 
 
 
 
 
 
72
  faces = app.get(crop)
73
  if len(faces) == 0:
74
  return None
75
 
 
76
  face = max(faces, key=lambda x: x.det_score)
77
  embedding = face.embedding
78
  embedding_norm = embedding / np.linalg.norm(embedding)
@@ -96,6 +166,7 @@ def process_batch(batch, sam3d_dataset):
96
 
97
  for idx, image_pil in enumerate(images):
98
  image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}'
 
99
 
100
  # Find corresponding SAM3D data
101
  sam3d_row = sam3d_dataset.filter(lambda x: x['image_id'] == image_id).take(1)
@@ -121,31 +192,17 @@ def process_batch(batch, sam3d_dataset):
121
  kpts2d = human.get('keypoints_2d')
122
  kpts3d = human.get('keypoints_3d')
123
 
124
- # Check if face is valid
125
- if bbox is None or kpts2d is None or kpts3d is None:
126
- embeddings.append(None)
127
- continue
128
-
129
- kpts2d_arr = np.array(kpts2d)
130
- kpts3d_arr = np.array(kpts3d)
131
-
132
- if len(kpts2d_arr) < 3 or len(kpts3d_arr) < 3:
133
  embeddings.append(None)
134
  continue
135
 
136
- # Check face keypoints valid
137
- nose_3d = kpts3d_arr[0]
138
- left_eye_3d = kpts3d_arr[1]
139
- right_eye_3d = kpts3d_arr[2]
140
-
141
- if (np.linalg.norm(nose_3d) < 1e-6 or
142
- np.linalg.norm(left_eye_3d) < 1e-6 or
143
- np.linalg.norm(right_eye_3d) < 1e-6):
144
  embeddings.append(None)
145
  continue
146
 
147
- # Extract embedding
148
- embedding = extract_embedding(face_app, image_bgr, bbox)
149
  embeddings.append(embedding)
150
 
151
  results_list.append({
 
57
  return app
58
 
59
 
60
+ def make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2):
61
+ """Convert bbox to square with padding for face detection"""
62
+ x1, y1, x2, y2 = bbox
63
+ w = x2 - x1
64
+ h = y2 - y1
65
+
66
+ # Make square
67
+ size = max(w, h)
68
+ cx = (x1 + x2) / 2
69
+ cy = (y1 + y2) / 2
70
+
71
+ # Add padding
72
+ size = size * (1 + padding)
73
+
74
+ # Get square bbox
75
+ x1_sq = max(0, int(cx - size / 2))
76
+ y1_sq = max(0, int(cy - size / 2))
77
+ x2_sq = min(img_width, int(cx + size / 2))
78
+ y2_sq = min(img_height, int(cy + size / 2))
79
+
80
+ return [x1_sq, y1_sq, x2_sq, y2_sq]
81
+
82
+
83
+ def has_valid_face(keypoints_2d, keypoints_3d, img_width, img_height):
84
+ """Check if human has a valid, visible face"""
85
+ if keypoints_2d is None or keypoints_3d is None:
86
+ return False
87
+
88
+ kpts2d_arr = np.array(keypoints_2d)
89
+ kpts3d_arr = np.array(keypoints_3d)
90
+
91
+ if len(kpts2d_arr) < 3 or len(kpts3d_arr) < 3:
92
+ return False
93
+
94
+ # Check face keypoints (nose, left eye, right eye)
95
+ nose_2d = kpts2d_arr[0]
96
+ left_eye_2d = kpts2d_arr[1]
97
+ right_eye_2d = kpts2d_arr[2]
98
+ nose_3d = kpts3d_arr[0]
99
+ left_eye_3d = kpts3d_arr[1]
100
+ right_eye_3d = kpts3d_arr[2]
101
+
102
+ # Check 3D keypoints are valid (not at origin)
103
+ keypoints_valid_3d = (np.linalg.norm(nose_3d) > 1e-6 and
104
+ np.linalg.norm(left_eye_3d) > 1e-6 and
105
+ np.linalg.norm(right_eye_3d) > 1e-6)
106
+
107
+ if not keypoints_valid_3d:
108
+ return False
109
+
110
+ # Check 2D keypoints are within image bounds
111
+ for kp in [nose_2d, left_eye_2d, right_eye_2d]:
112
+ if (kp[0] < 0 or kp[0] >= img_width or
113
+ kp[1] < 0 or kp[1] >= img_height):
114
+ return False
115
+
116
+ return True
117
+
118
+
119
+ def extract_embedding(app, image_bgr, bbox, img_width, img_height):
120
+ """Extract face embedding from bbox region with proper cropping and padding"""
121
  try:
122
+ # Make square bbox with padding for better face detection
123
+ square_bbox = make_square_bbox_with_padding(bbox, img_width, img_height, padding=0.2)
124
+ x1, y1, x2, y2 = square_bbox
125
+
126
+ # Crop to square region
 
 
127
  crop = image_bgr[y1:y2, x1:x2]
128
 
129
+ if crop.size == 0:
130
+ return None
131
+
132
+ # Resize to optimal size for InsightFace (640x640 max)
133
+ crop_h, crop_w = crop.shape[:2]
134
+ if max(crop_h, crop_w) > 640:
135
+ scale = 640 / max(crop_h, crop_w)
136
+ new_h = int(crop_h * scale)
137
+ new_w = int(crop_w * scale)
138
+ crop = cv2.resize(crop, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
139
+
140
+ # Detect faces
141
  faces = app.get(crop)
142
  if len(faces) == 0:
143
  return None
144
 
145
+ # Use the most confident face
146
  face = max(faces, key=lambda x: x.det_score)
147
  embedding = face.embedding
148
  embedding_norm = embedding / np.linalg.norm(embedding)
 
166
 
167
  for idx, image_pil in enumerate(images):
168
  image_id = Path(image_paths[idx]).stem if image_paths[idx] else f'img_{idx:06d}'
169
+ img_width, img_height = image_pil.size
170
 
171
  # Find corresponding SAM3D data
172
  sam3d_row = sam3d_dataset.filter(lambda x: x['image_id'] == image_id).take(1)
 
192
  kpts2d = human.get('keypoints_2d')
193
  kpts3d = human.get('keypoints_3d')
194
 
195
+ # Check if this human has a valid, visible face
196
+ if not has_valid_face(kpts2d, kpts3d, img_width, img_height):
 
 
 
 
 
 
 
197
  embeddings.append(None)
198
  continue
199
 
200
+ if bbox is None:
 
 
 
 
 
 
 
201
  embeddings.append(None)
202
  continue
203
 
204
+ # Extract embedding from face region
205
+ embedding = extract_embedding(face_app, image_bgr, bbox, img_width, img_height)
206
  embeddings.append(embedding)
207
 
208
  results_list.append({