intuitive262 commited on
Commit
31ef40f
·
1 Parent(s): cff3351

Updated app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -97
app.py CHANGED
@@ -130,94 +130,94 @@ def process_image(image, operation_name, age_filters=[], gender_filters=[], sele
130
 
131
  return [image_with_boxes_rgb, processed_image_rgb, json.dumps(results_data, indent=2), face_thumbnails]
132
 
133
- def process_video(video_path, operation_name, age_filters=[], gender_filters=[], progress=gr.Progress()):
134
- """Process a video with face blurring"""
135
- if video_path is None:
136
- return None, "Please upload a video"
137
 
138
- # Get operation code
139
- operation = OPERATION_OPTIONS.get(operation_name, 0)
140
 
141
- # Create a temporary file for the output
142
- output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
143
 
144
- # Open the video
145
- cap = cv2.VideoCapture(video_path)
146
- if not cap.isOpened():
147
- return None, "Could not open video file"
148
 
149
- # Get video properties
150
- width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
151
- height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
152
- fps = cap.get(cv2.CAP_PROP_FPS)
153
- total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
154
 
155
- # Determine frame skipping (process every nth frame for speed)
156
- frame_skip = max(1, round(fps / 15)) # Process at most 15 fps
157
 
158
- # Create VideoWriter object
159
- fourcc = cv2.VideoWriter_fourcc(*'mp4v')
160
- out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
161
 
162
- # Create filters dictionary
163
- filters = {
164
- "gender": gender_filters,
165
- "age": age_filters
166
- }
167
 
168
- # Process frames
169
- frame_count = 0
170
- face_count = 0
171
 
172
- # Process limited frames to prevent timeout (Gradio has a 60s limit by default)
173
- max_frames_to_process = min(300, total_frames) # Limit to 300 frames
174
 
175
- for _ in progress.tqdm(range(max_frames_to_process)):
176
- ret, frame = cap.read()
177
- if not ret:
178
- break
179
 
180
- # Process every nth frame (for efficiency)
181
- if frame_count % frame_skip == 0:
182
- # Detect faces
183
- loop = asyncio.new_event_loop()
184
- asyncio.set_event_loop(loop)
185
- predictions = loop.run_until_complete(detect_faces_frame(detector=detector, frame=frame))
186
- loop.close()
187
 
188
- face_count += len(predictions)
189
 
190
- # Apply blur
191
- loop = asyncio.new_event_loop()
192
- asyncio.set_event_loop(loop)
193
- processed_frame = loop.run_until_complete(
194
- apply_blur(
195
- detected_faces=predictions,
196
- frame=frame,
197
- filters=filters,
198
- operation=operation
199
- )
200
- )
201
- loop.close()
202
 
203
- # Write processed frame
204
- out.write(processed_frame)
205
- else:
206
- # Write original frame for skipped frames
207
- out.write(frame)
208
 
209
- frame_count += 1
210
 
211
- # Release resources
212
- cap.release()
213
- out.release()
214
 
215
- # Summary message
216
- summary = f"Processed {frame_count} frames, detected {face_count} faces"
217
- if frame_count < total_frames:
218
- summary += f" (limited to first {frame_count} frames out of {total_frames})"
219
 
220
- return output_path, summary
221
 
222
  # Create Gradio interface
223
  with gr.Blocks(title="Face Privacy Protection Tool") as demo:
@@ -279,37 +279,37 @@ with gr.Blocks(title="Face Privacy Protection Tool") as demo:
279
  outputs=[image_with_boxes, image_output, json_output, face_gallery]
280
  )
281
 
282
- with gr.TabItem("Video Processing"):
283
- with gr.Row():
284
- with gr.Column():
285
- video_input = gr.Video(label="Upload Video")
286
- video_operation = gr.Dropdown(
287
- choices=list(OPERATION_OPTIONS.keys()),
288
- value="Gaussian Blur",
289
- label="Blur Operation"
290
- )
291
 
292
- with gr.Accordion("Advanced Filtering", open=False):
293
- video_age_filter = gr.CheckboxGroup(
294
- choices=AGE_OPTIONS,
295
- label="Filter by Age (select to blur)"
296
- )
297
- video_gender_filter = gr.CheckboxGroup(
298
- choices=GENDER_OPTIONS,
299
- label="Filter by Gender (select to blur)"
300
- )
301
 
302
- video_button = gr.Button("Process Video")
303
 
304
- with gr.Column():
305
- video_output = gr.Video(label="Processed Video")
306
- video_summary = gr.Textbox(label="Processing Summary")
307
 
308
- video_button.click(
309
- process_video,
310
- inputs=[video_input, video_operation, video_age_filter, video_gender_filter],
311
- outputs=[video_output, video_summary]
312
- )
313
 
314
  gr.Markdown("""
315
  ## How to Use
 
130
 
131
  return [image_with_boxes_rgb, processed_image_rgb, json.dumps(results_data, indent=2), face_thumbnails]
132
 
133
+ # def process_video(video_path, operation_name, age_filters=[], gender_filters=[], progress=gr.Progress()):
134
+ # """Process a video with face blurring"""
135
+ # if video_path is None:
136
+ # return None, "Please upload a video"
137
 
138
+ # # Get operation code
139
+ # operation = OPERATION_OPTIONS.get(operation_name, 0)
140
 
141
+ # # Create a temporary file for the output
142
+ # output_path = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
143
 
144
+ # # Open the video
145
+ # cap = cv2.VideoCapture(video_path)
146
+ # if not cap.isOpened():
147
+ # return None, "Could not open video file"
148
 
149
+ # # Get video properties
150
+ # width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
151
+ # height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
152
+ # fps = cap.get(cv2.CAP_PROP_FPS)
153
+ # total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
154
 
155
+ # # Determine frame skipping (process every nth frame for speed)
156
+ # frame_skip = max(1, round(fps / 15)) # Process at most 15 fps
157
 
158
+ # # Create VideoWriter object
159
+ # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
160
+ # out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
161
 
162
+ # # Create filters dictionary
163
+ # filters = {
164
+ # "gender": gender_filters,
165
+ # "age": age_filters
166
+ # }
167
 
168
+ # # Process frames
169
+ # frame_count = 0
170
+ # face_count = 0
171
 
172
+ # # Process limited frames to prevent timeout (Gradio has a 60s limit by default)
173
+ # max_frames_to_process = min(300, total_frames) # Limit to 300 frames
174
 
175
+ # for _ in progress.tqdm(range(max_frames_to_process)):
176
+ # ret, frame = cap.read()
177
+ # if not ret:
178
+ # break
179
 
180
+ # # Process every nth frame (for efficiency)
181
+ # if frame_count % frame_skip == 0:
182
+ # # Detect faces
183
+ # loop = asyncio.new_event_loop()
184
+ # asyncio.set_event_loop(loop)
185
+ # predictions = loop.run_until_complete(detect_faces_frame(detector=detector, frame=frame))
186
+ # loop.close()
187
 
188
+ # face_count += len(predictions)
189
 
190
+ # # Apply blur
191
+ # loop = asyncio.new_event_loop()
192
+ # asyncio.set_event_loop(loop)
193
+ # processed_frame = loop.run_until_complete(
194
+ # apply_blur(
195
+ # detected_faces=predictions,
196
+ # frame=frame,
197
+ # filters=filters,
198
+ # operation=operation
199
+ # )
200
+ # )
201
+ # loop.close()
202
 
203
+ # # Write processed frame
204
+ # out.write(processed_frame)
205
+ # else:
206
+ # # Write original frame for skipped frames
207
+ # out.write(frame)
208
 
209
+ # frame_count += 1
210
 
211
+ # # Release resources
212
+ # cap.release()
213
+ # out.release()
214
 
215
+ # # Summary message
216
+ # summary = f"Processed {frame_count} frames, detected {face_count} faces"
217
+ # if frame_count < total_frames:
218
+ # summary += f" (limited to first {frame_count} frames out of {total_frames})"
219
 
220
+ # return output_path, summary
221
 
222
  # Create Gradio interface
223
  with gr.Blocks(title="Face Privacy Protection Tool") as demo:
 
279
  outputs=[image_with_boxes, image_output, json_output, face_gallery]
280
  )
281
 
282
+ # with gr.TabItem("Video Processing"):
283
+ # with gr.Row():
284
+ # with gr.Column():
285
+ # video_input = gr.Video(label="Upload Video")
286
+ # video_operation = gr.Dropdown(
287
+ # choices=list(OPERATION_OPTIONS.keys()),
288
+ # value="Gaussian Blur",
289
+ # label="Blur Operation"
290
+ # )
291
 
292
+ # with gr.Accordion("Advanced Filtering", open=False):
293
+ # video_age_filter = gr.CheckboxGroup(
294
+ # choices=AGE_OPTIONS,
295
+ # label="Filter by Age (select to blur)"
296
+ # )
297
+ # video_gender_filter = gr.CheckboxGroup(
298
+ # choices=GENDER_OPTIONS,
299
+ # label="Filter by Gender (select to blur)"
300
+ # )
301
 
302
+ # video_button = gr.Button("Process Video")
303
 
304
+ # with gr.Column():
305
+ # video_output = gr.Video(label="Processed Video")
306
+ # video_summary = gr.Textbox(label="Processing Summary")
307
 
308
+ # video_button.click(
309
+ # process_video,
310
+ # inputs=[video_input, video_operation, video_age_filter, video_gender_filter],
311
+ # outputs=[video_output, video_summary]
312
+ # )
313
 
314
  gr.Markdown("""
315
  ## How to Use