|
(shared_env) (base) com@COMV-7960 facefusion-next % grep -i "path" facefusion.ini |
|
|
|
[paths] |
|
temp_path = |
|
jobs_path = |
|
source_paths = |
|
target_path = |
|
output_path = |
|
(shared_env) (base) com@COMV-7960 facefusion-next % grep -iR "path" facefusion/ |
|
|
|
facefusion//face_recognizer.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//face_recognizer.py: 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.hash') |
|
facefusion//face_recognizer.py: 'path': resolve_relative_path('../.assets/models/arcface_w600k_r50.onnx') |
|
facefusion//execution.py: 'trt_engine_cache_path': '.caches', |
|
facefusion//execution.py: 'trt_timing_cache_path': '.caches', |
|
facefusion//content_analyser.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//content_analyser.py: 'path': resolve_relative_path('../.assets/models/yolo_11m_nsfw.hash') |
|
facefusion//content_analyser.py: 'path': resolve_relative_path('../.assets/models/yolo_11m_nsfw.onnx') |
|
facefusion//content_analyser.py:def analyse_image(image_path : str) -> bool: |
|
facefusion//content_analyser.py: vision_frame = read_image(image_path) |
|
facefusion//content_analyser.py:def analyse_video(video_path : str, trim_frame_start : int, trim_frame_end : int) -> bool: |
|
facefusion//content_analyser.py: video_fps = detect_video_fps(video_path) |
|
facefusion//content_analyser.py: vision_frame = read_video_frame(video_path, frame_number) |
|
facefusion//config.py: CONFIG_PARSER.read(state_manager.get_item('config_path'), encoding = 'utf-8') |
|
facefusion//ffmpeg_builder.py:def set_input(input_path : str) -> Commands: |
|
facefusion//ffmpeg_builder.py: return [ '-i', input_path ] |
|
facefusion//ffmpeg_builder.py:def set_output(output_path : str) -> Commands: |
|
facefusion//ffmpeg_builder.py: return [ output_path ] |
|
facefusion//ffmpeg_builder.py:def force_output(output_path : str) -> Commands: |
|
facefusion//ffmpeg_builder.py: return [ '-y', output_path ] |
|
facefusion//ffmpeg_builder.py:def set_image_quality(image_path : str, image_quality : int) -> Commands: |
|
facefusion//ffmpeg_builder.py: if get_file_format(image_path) == 'webp': |
|
facefusion//vision.py:def read_static_image(image_path : str) -> Optional[VisionFrame]: |
|
facefusion//vision.py: return read_image(image_path) |
|
facefusion//vision.py:def read_static_images(image_paths : List[str]) -> List[VisionFrame]: |
|
facefusion//vision.py: if image_paths: |
|
facefusion//vision.py: for image_path in image_paths: |
|
facefusion//vision.py: frames.append(read_static_image(image_path)) |
|
facefusion//vision.py:def read_image(image_path : str) -> Optional[VisionFrame]: |
|
facefusion//vision.py: if is_image(image_path): |
|
facefusion//vision.py: image_buffer = numpy.fromfile(image_path, dtype = numpy.uint8) |
|
facefusion//vision.py: return cv2.imread(image_path) |
|
facefusion//vision.py:def write_image(image_path : str, vision_frame : VisionFrame) -> bool: |
|
facefusion//vision.py: if image_path: |
|
facefusion//vision.py: image_file_extension = get_file_extension(image_path) |
|
facefusion//vision.py: vision_frame.tofile(image_path) |
|
facefusion//vision.py: return is_image(image_path) |
|
facefusion//vision.py: return cv2.imwrite(image_path, vision_frame) |
|
facefusion//vision.py:def detect_image_resolution(image_path : str) -> Optional[Resolution]: |
|
facefusion//vision.py: if is_image(image_path): |
|
facefusion//vision.py: image = read_image(image_path) |
|
facefusion//vision.py:def restrict_image_resolution(image_path : str, resolution : Resolution) -> Resolution: |
|
facefusion//vision.py: if is_image(image_path): |
|
facefusion//vision.py: image_resolution = detect_image_resolution(image_path) |
|
facefusion//vision.py:def read_video_frame(video_path : str, frame_number : int = 0) -> Optional[VisionFrame]: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_capture = cv2.VideoCapture(video_path) |
|
facefusion//vision.py:def count_video_frame_total(video_path : str) -> int: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_capture = cv2.VideoCapture(video_path) |
|
facefusion//vision.py:def predict_video_frame_total(video_path : str, fps : Fps, trim_frame_start : int, trim_frame_end : int) -> int: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: target_video_fps = detect_video_fps(video_path) |
|
facefusion//vision.py: extract_frame_total = count_trim_frame_total(video_path, trim_frame_start, trim_frame_end) * fps / target_video_fps |
|
facefusion//vision.py:def detect_video_fps(video_path : str) -> Optional[float]: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_capture = cv2.VideoCapture(video_path) |
|
facefusion//vision.py:def restrict_video_fps(video_path : str, fps : Fps) -> Fps: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_fps = detect_video_fps(video_path) |
|
facefusion//vision.py:def detect_video_duration(video_path : str) -> Duration: |
|
facefusion//vision.py: video_frame_total = count_video_frame_total(video_path) |
|
facefusion//vision.py: video_fps = detect_video_fps(video_path) |
|
facefusion//vision.py:def count_trim_frame_total(video_path : str, trim_frame_start : Optional[int], trim_frame_end : Optional[int]) -> int: |
|
facefusion//vision.py: trim_frame_start, trim_frame_end = restrict_trim_frame(video_path, trim_frame_start, trim_frame_end) |
|
facefusion//vision.py:def restrict_trim_frame(video_path : str, trim_frame_start : Optional[int], trim_frame_end : Optional[int]) -> Tuple[int, int]: |
|
facefusion//vision.py: video_frame_total = count_video_frame_total(video_path) |
|
facefusion//vision.py:def detect_video_resolution(video_path : str) -> Optional[Resolution]: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_capture = cv2.VideoCapture(video_path) |
|
facefusion//vision.py:def restrict_video_resolution(video_path : str, resolution : Resolution) -> Resolution: |
|
facefusion//vision.py: if is_video(video_path): |
|
facefusion//vision.py: video_resolution = detect_video_resolution(video_path) |
|
facefusion//wording.py: # paths |
|
facefusion//wording.py: 'config_path': 'choose the config file to override defaults', |
|
facefusion//wording.py: 'temp_path': 'specify the directory for the temporary resources', |
|
facefusion//wording.py: 'jobs_path': 'specify the directory to store jobs', |
|
facefusion//wording.py: 'source_paths': 'choose the image or audio paths', |
|
facefusion//wording.py: 'target_path': 'choose the image or video path', |
|
facefusion//wording.py: 'output_path': 'specify the image or video within a directory', |
|
facefusion//wording.py: 'output_path_textbox': 'OUTPUT PATH', |
|
facefusion//download.py:def conditional_download(download_directory_path : str, urls : List[str]) -> None: |
|
facefusion//download.py: download_file_name = os.path.basename(urlparse(url).path) |
|
facefusion//download.py: download_file_path = os.path.join(download_directory_path, download_file_name) |
|
facefusion//download.py: initial_size = get_file_size(download_file_path) |
|
facefusion//download.py: curl_builder.download(url, download_file_path), |
|
facefusion//download.py: if is_file(download_file_path): |
|
facefusion//download.py: current_size = get_file_size(download_file_path) |
|
facefusion//download.py: hash_paths = [ hash_set.get(hash_key).get('path') for hash_key in hash_set.keys() ] |
|
facefusion//download.py: _, invalid_hash_paths = validate_hash_paths(hash_paths) |
|
facefusion//download.py: if invalid_hash_paths: |
|
facefusion//download.py: if hash_set.get(index).get('path') in invalid_hash_paths: |
|
facefusion//download.py: download_directory_path = os.path.dirname(hash_set.get(index).get('path')) |
|
facefusion//download.py: conditional_download(download_directory_path, [ invalid_hash_url ]) |
|
facefusion//download.py: valid_hash_paths, invalid_hash_paths = validate_hash_paths(hash_paths) |
|
facefusion//download.py: for valid_hash_path in valid_hash_paths: |
|
facefusion//download.py: valid_hash_file_name = get_file_name(valid_hash_path) |
|
facefusion//download.py: for invalid_hash_path in invalid_hash_paths: |
|
facefusion//download.py: invalid_hash_file_name = get_file_name(invalid_hash_path) |
|
facefusion//download.py: if not invalid_hash_paths: |
|
facefusion//download.py: return not invalid_hash_paths |
|
facefusion//download.py: source_paths = [ source_set.get(source_key).get('path') for source_key in source_set.keys() ] |
|
facefusion//download.py: _, invalid_source_paths = validate_source_paths(source_paths) |
|
facefusion//download.py: if invalid_source_paths: |
|
facefusion//download.py: if source_set.get(index).get('path') in invalid_source_paths: |
|
facefusion//download.py: download_directory_path = os.path.dirname(source_set.get(index).get('path')) |
|
facefusion//download.py: conditional_download(download_directory_path, [ invalid_source_url ]) |
|
facefusion//download.py: valid_source_paths, invalid_source_paths = validate_source_paths(source_paths) |
|
facefusion//download.py: for valid_source_path in valid_source_paths: |
|
facefusion//download.py: valid_source_file_name = get_file_name(valid_source_path) |
|
facefusion//download.py: for invalid_source_path in invalid_source_paths: |
|
facefusion//download.py: invalid_source_file_name = get_file_name(invalid_source_path) |
|
facefusion//download.py: if remove_file(invalid_source_path): |
|
facefusion//download.py: if not invalid_source_paths: |
|
facefusion//download.py: return not invalid_source_paths |
|
facefusion//download.py:def validate_hash_paths(hash_paths : List[str]) -> Tuple[List[str], List[str]]: |
|
facefusion//download.py: valid_hash_paths = [] |
|
facefusion//download.py: invalid_hash_paths = [] |
|
facefusion//download.py: for hash_path in hash_paths: |
|
facefusion//download.py: if is_file(hash_path): |
|
facefusion//download.py: valid_hash_paths.append(hash_path) |
|
facefusion//download.py: invalid_hash_paths.append(hash_path) |
|
facefusion//download.py: return valid_hash_paths, invalid_hash_paths |
|
facefusion//download.py:def validate_source_paths(source_paths : List[str]) -> Tuple[List[str], List[str]]: |
|
facefusion//download.py: valid_source_paths = [] |
|
facefusion//download.py: invalid_source_paths = [] |
|
facefusion//download.py: for source_path in source_paths: |
|
facefusion//download.py: if validate_hash(source_path): |
|
facefusion//download.py: valid_source_paths.append(source_path) |
|
facefusion//download.py: invalid_source_paths.append(source_path) |
|
facefusion//download.py: return valid_source_paths, invalid_source_paths |
|
facefusion//download.py: return download_provider_url + download_provider_value.get('path').format(base_name = base_name, file_name = file_name) |
|
facefusion//filesystem.py:def get_file_size(file_path : str) -> int: |
|
facefusion//filesystem.py: if is_file(file_path): |
|
facefusion//filesystem.py: return os.path.getsize(file_path) |
|
facefusion//filesystem.py:def get_file_name(file_path : str) -> Optional[str]: |
|
facefusion//filesystem.py: file_name, _ = os.path.splitext(os.path.basename(file_path)) |
|
facefusion//filesystem.py:def get_file_extension(file_path : str) -> Optional[str]: |
|
facefusion//filesystem.py: _, file_extension = os.path.splitext(file_path) |
|
facefusion//filesystem.py:def get_file_format(file_path : str) -> Optional[str]: |
|
facefusion//filesystem.py: file_extension = get_file_extension(file_path) |
|
facefusion//filesystem.py:def same_file_extension(first_file_path : str, second_file_path : str) -> bool: |
|
facefusion//filesystem.py: first_file_extension = get_file_extension(first_file_path) |
|
facefusion//filesystem.py: second_file_extension = get_file_extension(second_file_path) |
|
facefusion//filesystem.py: return get_file_extension(first_file_path) == get_file_extension(second_file_path) |
|
facefusion//filesystem.py:def is_file(file_path : str) -> bool: |
|
facefusion//filesystem.py: if file_path: |
|
facefusion//filesystem.py: return os.path.isfile(file_path) |
|
facefusion//filesystem.py:def is_audio(audio_path : str) -> bool: |
|
facefusion//filesystem.py: return is_file(audio_path) and get_file_format(audio_path) in facefusion.choices.audio_formats |
|
facefusion//filesystem.py:def has_audio(audio_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if audio_paths: |
|
facefusion//filesystem.py: return any(map(is_audio, audio_paths)) |
|
facefusion//filesystem.py:def are_audios(audio_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if audio_paths: |
|
facefusion//filesystem.py: return all(map(is_audio, audio_paths)) |
|
facefusion//filesystem.py:def is_image(image_path : str) -> bool: |
|
facefusion//filesystem.py: return is_file(image_path) and get_file_format(image_path) in facefusion.choices.image_formats |
|
facefusion//filesystem.py:def has_image(image_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if image_paths: |
|
facefusion//filesystem.py: return any(is_image(image_path) for image_path in image_paths) |
|
facefusion//filesystem.py:def are_images(image_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if image_paths: |
|
facefusion//filesystem.py: return all(map(is_image, image_paths)) |
|
facefusion//filesystem.py:def is_video(video_path : str) -> bool: |
|
facefusion//filesystem.py: return is_file(video_path) and get_file_format(video_path) in facefusion.choices.video_formats |
|
facefusion//filesystem.py:def has_video(video_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if video_paths: |
|
facefusion//filesystem.py: return any(map(is_video, video_paths)) |
|
facefusion//filesystem.py:def are_videos(video_paths : List[str]) -> bool: |
|
facefusion//filesystem.py: if video_paths: |
|
facefusion//filesystem.py: return any(map(is_video, video_paths)) |
|
facefusion//filesystem.py:def filter_audio_paths(paths : List[str]) -> List[str]: |
|
facefusion//filesystem.py: if paths: |
|
facefusion//filesystem.py: return [ path for path in paths if is_audio(path) ] |
|
facefusion//filesystem.py:def filter_image_paths(paths : List[str]) -> List[str]: |
|
facefusion//filesystem.py: if paths: |
|
facefusion//filesystem.py: return [ path for path in paths if is_image(path) ] |
|
facefusion//filesystem.py:def copy_file(file_path : str, move_path : str) -> bool: |
|
facefusion//filesystem.py: if is_file(file_path): |
|
facefusion//filesystem.py: shutil.copy(file_path, move_path) |
|
facefusion//filesystem.py: return is_file(move_path) |
|
facefusion//filesystem.py:def move_file(file_path : str, move_path : str) -> bool: |
|
facefusion//filesystem.py: if is_file(file_path): |
|
facefusion//filesystem.py: shutil.move(file_path, move_path) |
|
facefusion//filesystem.py: return not is_file(file_path) and is_file(move_path) |
|
facefusion//filesystem.py:def remove_file(file_path : str) -> bool: |
|
facefusion//filesystem.py: if is_file(file_path): |
|
facefusion//filesystem.py: os.remove(file_path) |
|
facefusion//filesystem.py: return not is_file(file_path) |
|
facefusion//filesystem.py:def resolve_file_paths(directory_path : str) -> List[str]: |
|
facefusion//filesystem.py: file_paths : List[str] = [] |
|
facefusion//filesystem.py: if is_directory(directory_path): |
|
facefusion//filesystem.py: file_names_and_extensions = sorted(os.listdir(directory_path)) |
|
facefusion//filesystem.py: file_path = os.path.join(directory_path, file_name_and_extension) |
|
facefusion//filesystem.py: file_paths.append(file_path) |
|
facefusion//filesystem.py: return file_paths |
|
facefusion//filesystem.py:def is_directory(directory_path : str) -> bool: |
|
facefusion//filesystem.py: if directory_path: |
|
facefusion//filesystem.py: return os.path.isdir(directory_path) |
|
facefusion//filesystem.py:def in_directory(file_path : str) -> bool: |
|
facefusion//filesystem.py: if file_path: |
|
facefusion//filesystem.py: directory_path = os.path.dirname(file_path) |
|
facefusion//filesystem.py: if directory_path: |
|
facefusion//filesystem.py: return not is_directory(file_path) and is_directory(directory_path) |
|
facefusion//filesystem.py:def create_directory(directory_path : str) -> bool: |
|
facefusion//filesystem.py: if directory_path and not is_file(directory_path): |
|
facefusion//filesystem.py: os.makedirs(directory_path, exist_ok = True) |
|
facefusion//filesystem.py: return is_directory(directory_path) |
|
facefusion//filesystem.py:def remove_directory(directory_path : str) -> bool: |
|
facefusion//filesystem.py: if is_directory(directory_path): |
|
facefusion//filesystem.py: shutil.rmtree(directory_path, ignore_errors = True) |
|
facefusion//filesystem.py: return not is_directory(directory_path) |
|
facefusion//filesystem.py:def resolve_relative_path(path : str) -> str: |
|
facefusion//filesystem.py: return os.path.abspath(os.path.join(os.path.dirname(__file__), path)) |
|
facefusion//face_landmarker.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/2dfan4.hash') |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/2dfan4.onnx') |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/peppa_wutz.hash') |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/peppa_wutz.onnx') |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/fan_68_5.hash') |
|
facefusion//face_landmarker.py: 'path': resolve_relative_path('../.assets/models/fan_68_5.onnx') |
|
facefusion//ffmpeg.py:from facefusion.temp_helper import get_temp_file_path, get_temp_frames_pattern |
|
facefusion//ffmpeg.py:def extract_frames(target_path : str, temp_video_resolution : str, temp_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: |
|
facefusion//ffmpeg.py: extract_frame_total = predict_video_frame_total(target_path, temp_video_fps, trim_frame_start, trim_frame_end) |
|
facefusion//ffmpeg.py: temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d') |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(target_path), |
|
facefusion//ffmpeg.py:def copy_image(target_path : str, temp_image_resolution : str) -> bool: |
|
facefusion//ffmpeg.py: temp_file_path = get_temp_file_path(target_path) |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(target_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_image_quality(target_path, 100), |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(temp_file_path) |
|
facefusion//ffmpeg.py:def finalize_image(target_path : str, output_path : str, output_image_resolution : str) -> bool: |
|
facefusion//ffmpeg.py: temp_file_path = get_temp_file_path(target_path) |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(temp_file_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_image_quality(target_path, output_image_quality), |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(output_path) |
|
facefusion//ffmpeg.py:def read_audio_buffer(target_path : str, audio_sample_rate : int, audio_sample_size : int, audio_channel_total : int) -> Optional[AudioBuffer]: |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(target_path), |
|
facefusion//ffmpeg.py:def restore_audio(target_path : str, output_path : str, trim_frame_start : int, trim_frame_end : int) -> bool: |
|
facefusion//ffmpeg.py: target_video_fps = detect_video_fps(target_path) |
|
facefusion//ffmpeg.py: temp_file_path = get_temp_file_path(target_path) |
|
facefusion//ffmpeg.py: temp_video_duration = detect_video_duration(temp_file_path) |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(temp_file_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(target_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(output_path) |
|
facefusion//ffmpeg.py:def replace_audio(target_path : str, audio_path : str, output_path : str) -> bool: |
|
facefusion//ffmpeg.py: temp_file_path = get_temp_file_path(target_path) |
|
facefusion//ffmpeg.py: temp_video_duration = detect_video_duration(temp_file_path) |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(temp_file_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.set_input(audio_path), |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(output_path) |
|
facefusion//ffmpeg.py:def merge_video(target_path : str, temp_video_fps : Fps, output_video_resolution : str, output_video_fps : Fps, trim_frame_start : int, trim_frame_end : int) -> bool: |
|
facefusion//ffmpeg.py: merge_frame_total = predict_video_frame_total(target_path, output_video_fps, trim_frame_start, trim_frame_end) |
|
facefusion//ffmpeg.py: temp_file_path = get_temp_file_path(target_path) |
|
facefusion//ffmpeg.py: temp_frames_pattern = get_temp_frames_pattern(target_path, '%08d') |
|
facefusion//ffmpeg.py: if get_file_format(target_path) == 'webm': |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(temp_file_path) |
|
facefusion//ffmpeg.py:def concat_video(output_path : str, temp_output_paths : List[str]) -> bool: |
|
facefusion//ffmpeg.py: concat_video_path = tempfile.mktemp() |
|
facefusion//ffmpeg.py: with open(concat_video_path, 'w') as concat_video_file: |
|
facefusion//ffmpeg.py: for temp_output_path in temp_output_paths: |
|
facefusion//ffmpeg.py: concat_video_file.write('file \'' + os.path.abspath(temp_output_path) + '\'' + os.linesep) |
|
facefusion//ffmpeg.py: output_path = os.path.abspath(output_path) |
|
facefusion//ffmpeg.py: ffmpeg_builder.force_output(output_path) |
|
facefusion//ffmpeg.py: remove_file(concat_video_path) |
|
facefusion//core.py:from facefusion.filesystem import filter_audio_paths, get_file_name, is_image, is_video, resolve_file_paths, resolve_file_pattern |
|
facefusion//core.py:from facefusion.temp_helper import clear_temp_directory, create_temp_directory, get_temp_file_path, move_temp_file, resolve_temp_frame_paths |
|
facefusion//core.py: if not job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//core.py: if not job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//core.py: if not job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//core.py: if not job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//core.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//core.py: source_paths = resolve_file_pattern(job_args.get('source_pattern')) |
|
facefusion//core.py: target_paths = resolve_file_pattern(job_args.get('target_pattern')) |
|
facefusion//core.py: if source_paths and target_paths: |
|
facefusion//core.py: for index, (source_path, target_path) in enumerate(itertools.product(source_paths, target_paths)): |
|
facefusion//core.py: step_args['source_paths'] = [ source_path ] |
|
facefusion//core.py: step_args['target_path'] = target_path |
|
facefusion//core.py: step_args['output_path'] = job_args.get('output_pattern').format(index = index) |
|
facefusion//core.py: if not source_paths and target_paths: |
|
facefusion//core.py: for index, target_path in enumerate(target_paths): |
|
facefusion//core.py: step_args['target_path'] = target_path |
|
facefusion//core.py: step_args['output_path'] = job_args.get('output_pattern').format(index = index) |
|
facefusion//core.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//core.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//core.py: source_frames = read_static_images(state_manager.get_item('source_paths')) |
|
facefusion//core.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//core.py: reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) |
|
facefusion//core.py: reference_frame = read_image(state_manager.get_item('target_path')) |
|
facefusion//core.py: if analyse_image(state_manager.get_item('target_path')): |
|
facefusion//core.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: create_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: temp_image_resolution = pack_resolution(restrict_image_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_image_resolution')))) |
|
facefusion//core.py: if copy_image(state_manager.get_item('target_path'), temp_image_resolution): |
|
facefusion//core.py: temp_file_path = get_temp_file_path(state_manager.get_item('target_path')) |
|
facefusion//core.py: processor_module.process_image(state_manager.get_item('source_paths'), temp_file_path, temp_file_path) |
|
facefusion//core.py: if finalize_image(state_manager.get_item('target_path'), state_manager.get_item('output_path'), state_manager.get_item('output_image_resolution')): |
|
facefusion//core.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: if is_image(state_manager.get_item('output_path')): |
|
facefusion//core.py: trim_frame_start, trim_frame_end = restrict_trim_frame(state_manager.get_item('target_path'), state_manager.get_item('trim_frame_start'), state_manager.get_item('trim_frame_end')) |
|
facefusion//core.py: if analyse_video(state_manager.get_item('target_path'), trim_frame_start, trim_frame_end): |
|
facefusion//core.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: create_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: temp_video_resolution = pack_resolution(restrict_video_resolution(state_manager.get_item('target_path'), unpack_resolution(state_manager.get_item('output_video_resolution')))) |
|
facefusion//core.py: temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps')) |
|
facefusion//core.py: if extract_frames(state_manager.get_item('target_path'), temp_video_resolution, temp_video_fps, trim_frame_start, trim_frame_end): |
|
facefusion//core.py: temp_frame_paths = resolve_temp_frame_paths(state_manager.get_item('target_path')) |
|
facefusion//core.py: if temp_frame_paths: |
|
facefusion//core.py: processor_module.process_video(state_manager.get_item('source_paths'), temp_frame_paths) |
|
facefusion//core.py: if merge_video(state_manager.get_item('target_path'), temp_video_fps, state_manager.get_item('output_video_resolution'), state_manager.get_item('output_video_fps'), trim_frame_start, trim_frame_end): |
|
facefusion//core.py: move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) |
|
facefusion//core.py: source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths'))) |
|
facefusion//core.py: if source_audio_path: |
|
facefusion//core.py: if replace_audio(state_manager.get_item('target_path'), source_audio_path, state_manager.get_item('output_path')): |
|
facefusion//core.py: move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) |
|
facefusion//core.py: if restore_audio(state_manager.get_item('target_path'), state_manager.get_item('output_path'), trim_frame_start, trim_frame_end): |
|
facefusion//core.py: move_temp_file(state_manager.get_item('target_path'), state_manager.get_item('output_path')) |
|
facefusion//core.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//core.py: if is_video(state_manager.get_item('output_path')): |
|
facefusion//types.py: 'frame_path' : str |
|
facefusion//types.py: 'path' : str |
|
facefusion//types.py: 'path' : str |
|
facefusion//types.py: 'config_path', |
|
facefusion//types.py: 'temp_path', |
|
facefusion//types.py: 'jobs_path', |
|
facefusion//types.py: 'source_paths', |
|
facefusion//types.py: 'target_path', |
|
facefusion//types.py: 'output_path', |
|
facefusion//types.py: 'config_path' : str, |
|
facefusion//types.py: 'temp_path' : str, |
|
facefusion//types.py: 'jobs_path' : str, |
|
facefusion//types.py: 'source_paths' : List[str], |
|
facefusion//types.py: 'target_path' : str, |
|
facefusion//types.py: 'output_path' : str, |
|
Binary file facefusion//__pycache__/content_analyser.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/curl_builder.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/face_landmarker.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/core.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/config.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/audio.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/download.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/face_recognizer.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/face_masker.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/hash_helper.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/inference_manager.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/face_detector.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/filesystem.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/wording.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/ffmpeg.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/args.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/face_classifier.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/app_context.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/vision.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/execution.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/choices.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/types.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/temp_helper.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/exit_helper.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/json.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/program.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/installer.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/voice_extractor.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/model_helper.cpython-310.pyc matches |
|
Binary file facefusion//__pycache__/ffmpeg_builder.cpython-310.pyc matches |
|
facefusion//face_classifier.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//face_classifier.py: 'path': resolve_relative_path('../.assets/models/fairface.hash') |
|
facefusion//face_classifier.py: 'path': resolve_relative_path('../.assets/models/fairface.onnx') |
|
facefusion//installer.py: library_paths = [] |
|
facefusion//installer.py: if os.getenv('LD_LIBRARY_PATH'): |
|
facefusion//installer.py: library_paths = os.getenv('LD_LIBRARY_PATH').split(os.pathsep) |
|
facefusion//installer.py: library_paths.extend( |
|
facefusion//installer.py: os.path.join(os.getenv('CONDA_PREFIX'), 'lib'), |
|
facefusion//installer.py: os.path.join(os.getenv('CONDA_PREFIX'), 'lib', python_id, 'site-packages', 'tensorrt_libs') |
|
facefusion//installer.py: library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ])) |
|
facefusion//installer.py: subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'LD_LIBRARY_PATH=' + os.pathsep.join(library_paths) ]) |
|
facefusion//installer.py: if os.getenv('PATH'): |
|
facefusion//installer.py: library_paths = os.getenv('PATH').split(os.pathsep) |
|
facefusion//installer.py: library_paths.extend( |
|
facefusion//installer.py: os.path.join(os.getenv('CONDA_PREFIX'), 'Lib'), |
|
facefusion//installer.py: os.path.join(os.getenv('CONDA_PREFIX'), 'Lib', 'site-packages', 'tensorrt_libs') |
|
facefusion//installer.py: library_paths = list(dict.fromkeys([ library_path for library_path in library_paths if os.path.exists(library_path) ])) |
|
facefusion//installer.py: subprocess.call([ shutil.which('conda'), 'env', 'config', 'vars', 'set', 'PATH=' + os.pathsep.join(library_paths) ]) |
|
facefusion//curl_builder.py:def download(url : str, download_file_path : str) -> Commands: |
|
facefusion//curl_builder.py: return [ '--create-dirs', '--continue-at', '-', '--output', download_file_path, url ] |
|
facefusion//temp_helper.py:def get_temp_file_path(file_path : str) -> str: |
|
facefusion//temp_helper.py: temp_directory_path = get_temp_directory_path(file_path) |
|
facefusion//temp_helper.py: temp_file_extension = get_file_extension(file_path) |
|
facefusion//temp_helper.py: return os.path.join(temp_directory_path, 'temp' + temp_file_extension) |
|
facefusion//temp_helper.py:def move_temp_file(file_path : str, move_path : str) -> bool: |
|
facefusion//temp_helper.py: temp_file_path = get_temp_file_path(file_path) |
|
facefusion//temp_helper.py: return move_file(temp_file_path, move_path) |
|
facefusion//temp_helper.py:def resolve_temp_frame_paths(target_path : str) -> List[str]: |
|
facefusion//temp_helper.py: temp_frames_pattern = get_temp_frames_pattern(target_path, '*') |
|
facefusion//temp_helper.py:def get_temp_frames_pattern(target_path : str, temp_frame_prefix : str) -> str: |
|
facefusion//temp_helper.py: temp_directory_path = get_temp_directory_path(target_path) |
|
facefusion//temp_helper.py: return os.path.join(temp_directory_path, temp_frame_prefix + '.' + state_manager.get_item('temp_frame_format')) |
|
facefusion//temp_helper.py:def get_temp_directory_path(file_path : str) -> str: |
|
facefusion//temp_helper.py: temp_file_name = get_file_name(file_path) |
|
facefusion//temp_helper.py: return os.path.join(state_manager.get_item('temp_path'), 'facefusion', temp_file_name) |
|
facefusion//temp_helper.py:def create_temp_directory(file_path : str) -> bool: |
|
facefusion//temp_helper.py: temp_directory_path = get_temp_directory_path(file_path) |
|
facefusion//temp_helper.py: return create_directory(temp_directory_path) |
|
facefusion//temp_helper.py:def clear_temp_directory(file_path : str) -> bool: |
|
facefusion//temp_helper.py: temp_directory_path = get_temp_directory_path(file_path) |
|
facefusion//temp_helper.py: return remove_directory(temp_directory_path) |
|
facefusion//processors/core.py:def multi_process_frames(source_paths : List[str], temp_frame_paths : List[str], process_frames : ProcessFrames) -> None: |
|
facefusion//processors/core.py: queue_payloads = create_queue_payloads(temp_frame_paths) |
|
facefusion//processors/core.py: future = executor.submit(process_frames, source_paths, pick_queue(queue, queue_per_future), progress.update) |
|
facefusion//processors/core.py:def create_queue_payloads(temp_frame_paths : List[str]) -> List[QueuePayload]: |
|
facefusion//processors/core.py: temp_frame_paths = sorted(temp_frame_paths, key = os.path.basename) |
|
facefusion//processors/core.py: for frame_number, frame_path in enumerate(temp_frame_paths): |
|
facefusion//processors/core.py: 'frame_path': frame_path |
|
Binary file facefusion//processors/__pycache__/core.cpython-310.pyc matches |
|
Binary file facefusion//processors/__pycache__/choices.cpython-310.pyc matches |
|
facefusion//processors/choices.py:from facefusion.filesystem import get_file_name, resolve_file_paths, resolve_relative_path |
|
facefusion//processors/choices.py:custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom')) |
|
facefusion//processors/choices.py:if custom_model_file_paths: |
|
facefusion//processors/choices.py: for model_file_path in custom_model_file_paths: |
|
facefusion//processors/choices.py: model_id = '/'.join([ 'custom', get_file_name(model_file_path) ]) |
|
facefusion//processors/modules/frame_enhancer.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/clear_reality_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/clear_reality_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/lsdir_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/lsdir_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/nomos8k_sc_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x2.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x2_fp16.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x4_fp16.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x8.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_esrgan_x8_fp16.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_hatgan_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/real_web_photo_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/realistic_rescaler_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/remacri_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/remacri_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/siax_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/siax_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/span_kendata_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/span_kendata_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/swin2_sr_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.hash') |
|
facefusion//processors/modules/frame_enhancer.py: 'path': resolve_relative_path('../.assets/models/ultra_sharp_x4.onnx') |
|
facefusion//processors/modules/frame_enhancer.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/frame_enhancer.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/frame_enhancer.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/frame_enhancer.py:def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/frame_enhancer.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/frame_enhancer.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/frame_enhancer.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/frame_enhancer.py:def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/frame_enhancer.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/frame_enhancer.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/frame_enhancer.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/frame_enhancer.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/age_modifier.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/age_modifier.py: 'path': resolve_relative_path('../.assets/models/styleganex_age.hash') |
|
facefusion//processors/modules/age_modifier.py: 'path': resolve_relative_path('../.assets/models/styleganex_age.onnx') |
|
facefusion//processors/modules/age_modifier.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/age_modifier.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/age_modifier.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/age_modifier.py:def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/age_modifier.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/age_modifier.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/age_modifier.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/age_modifier.py:def process_image(source_path : str, target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/age_modifier.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/age_modifier.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/age_modifier.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/age_modifier.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/lip_syncer.py:from facefusion.filesystem import filter_audio_paths, has_audio, in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/lip_syncer.py: 'path': resolve_relative_path('../.assets/models/wav2lip_96.hash') |
|
facefusion//processors/modules/lip_syncer.py: 'path': resolve_relative_path('../.assets/models/wav2lip_96.onnx') |
|
facefusion//processors/modules/lip_syncer.py: 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.hash') |
|
facefusion//processors/modules/lip_syncer.py: 'path': resolve_relative_path('../.assets/models/wav2lip_gan_96.onnx') |
|
facefusion//processors/modules/lip_syncer.py: if not has_audio(state_manager.get_item('source_paths')): |
|
facefusion//processors/modules/lip_syncer.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/lip_syncer.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/lip_syncer.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/lip_syncer.py:def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/lip_syncer.py: source_audio_path = get_first(filter_audio_paths(source_paths)) |
|
facefusion//processors/modules/lip_syncer.py: temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps')) |
|
facefusion//processors/modules/lip_syncer.py: target_vision_path = queue_payload.get('frame_path') |
|
facefusion//processors/modules/lip_syncer.py: source_audio_frame = get_voice_frame(source_audio_path, temp_video_fps, frame_number) |
|
facefusion//processors/modules/lip_syncer.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/lip_syncer.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/lip_syncer.py:def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/lip_syncer.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/lip_syncer.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/lip_syncer.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/lip_syncer.py: source_audio_paths = filter_audio_paths(state_manager.get_item('source_paths')) |
|
facefusion//processors/modules/lip_syncer.py: temp_video_fps = restrict_video_fps(state_manager.get_item('target_path'), state_manager.get_item('output_video_fps')) |
|
facefusion//processors/modules/lip_syncer.py: for source_audio_path in source_audio_paths: |
|
facefusion//processors/modules/lip_syncer.py: read_static_voice(source_audio_path, temp_video_fps) |
|
facefusion//processors/modules/lip_syncer.py: processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/deep_swapper.py:from facefusion.filesystem import get_file_name, in_directory, is_image, is_video, resolve_file_paths, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/deep_swapper.py: 'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.hash') |
|
facefusion//processors/modules/deep_swapper.py: 'path': resolve_relative_path('../.assets/models/' + model_scope + '/' + model_name + '.dfm') |
|
facefusion//processors/modules/deep_swapper.py: custom_model_file_paths = resolve_file_paths(resolve_relative_path('../.assets/models/custom')) |
|
facefusion//processors/modules/deep_swapper.py: if custom_model_file_paths: |
|
facefusion//processors/modules/deep_swapper.py: for model_file_path in custom_model_file_paths: |
|
facefusion//processors/modules/deep_swapper.py: model_id = '/'.join([ 'custom', get_file_name(model_file_path) ]) |
|
facefusion//processors/modules/deep_swapper.py: 'path': resolve_relative_path(model_file_path) |
|
facefusion//processors/modules/deep_swapper.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/deep_swapper.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/deep_swapper.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/deep_swapper.py:def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/deep_swapper.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/deep_swapper.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/deep_swapper.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/deep_swapper.py:def process_image(source_path : str, target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/deep_swapper.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/deep_swapper.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/deep_swapper.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/deep_swapper.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/face_enhancer.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/codeformer.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/codeformer.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.2.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.3.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gfpgan_1.4.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_256.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_512.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_1024.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/gpen_bfr_2048.onnx') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.hash') |
|
facefusion//processors/modules/face_enhancer.py: 'path': resolve_relative_path('../.assets/models/restoreformer_plus_plus.onnx') |
|
facefusion//processors/modules/face_enhancer.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/face_enhancer.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_enhancer.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_enhancer.py:def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/face_enhancer.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/face_enhancer.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/face_enhancer.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/face_enhancer.py:def process_image(source_path : str, target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/face_enhancer.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/face_enhancer.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/face_enhancer.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/face_enhancer.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
Binary file facefusion//processors/modules/__pycache__/frame_enhancer.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/lip_syncer.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/face_swapper.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/expression_restorer.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/face_editor.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/face_enhancer.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/face_debugger.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/deep_swapper.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/age_modifier.cpython-310.pyc matches |
|
Binary file facefusion//processors/modules/__pycache__/frame_colorizer.cpython-310.pyc matches |
|
facefusion//processors/modules/face_debugger.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_debugger.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_debugger.py:def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/face_debugger.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/face_debugger.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/face_debugger.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/face_debugger.py:def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/face_debugger.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/face_debugger.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/face_debugger.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/face_debugger.py: processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/expression_restorer.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') |
|
facefusion//processors/modules/expression_restorer.py: 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') |
|
facefusion//processors/modules/expression_restorer.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/expression_restorer.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/expression_restorer.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/expression_restorer.py:def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/expression_restorer.py: source_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number) |
|
facefusion//processors/modules/expression_restorer.py: target_vision_path = queue_payload.get('frame_path') |
|
facefusion//processors/modules/expression_restorer.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/expression_restorer.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/expression_restorer.py:def process_image(source_path : str, target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/expression_restorer.py: source_vision_frame = read_static_image(state_manager.get_item('target_path')) |
|
facefusion//processors/modules/expression_restorer.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/expression_restorer.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/expression_restorer.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/expression_restorer.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/frame_colorizer.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/ddcolor.hash') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/ddcolor.onnx') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.hash') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/ddcolor_artistic.onnx') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify.hash') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify.onnx') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify_artistic.hash') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify_artistic.onnx') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify_stable.hash') |
|
facefusion//processors/modules/frame_colorizer.py: 'path': resolve_relative_path('../.assets/models/deoldify_stable.onnx') |
|
facefusion//processors/modules/frame_colorizer.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/frame_colorizer.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/frame_colorizer.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/frame_colorizer.py:def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/frame_colorizer.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/frame_colorizer.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/frame_colorizer.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/frame_colorizer.py:def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/frame_colorizer.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/frame_colorizer.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/frame_colorizer.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/frame_colorizer.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/face_editor.py:from facefusion.filesystem import in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_generator.hash') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_feature_extractor.onnx') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_motion_extractor.onnx') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_eye_retargeter.onnx') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_lip_retargeter.onnx') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_stitcher.onnx') |
|
facefusion//processors/modules/face_editor.py: 'path': resolve_relative_path('../.assets/models/live_portrait_generator.onnx') |
|
facefusion//processors/modules/face_editor.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/face_editor.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_editor.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_editor.py:def process_frames(source_path : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/face_editor.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/face_editor.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/face_editor.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/face_editor.py:def process_image(source_path : str, target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/face_editor.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/face_editor.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/face_editor.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/face_editor.py: processors.multi_process_frames(None, temp_frame_paths, process_frames) |
|
facefusion//processors/modules/face_swapper.py:from facefusion.filesystem import filter_image_paths, has_image, in_directory, is_image, is_video, resolve_relative_path, same_file_extension |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/blendswap_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/blendswap_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_1_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_1_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_2_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_2_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_3_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/ghost_3_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_ghost.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/hififace_unofficial_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_hififace.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/inswapper_128.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/inswapper_128.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/inswapper_128_fp16.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/simswap_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/simswap_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/simswap_unofficial_512.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/arcface_converter_simswap.onnx') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/uniface_256.hash') |
|
facefusion//processors/modules/face_swapper.py: 'path': resolve_relative_path('../.assets/models/uniface_256.onnx') |
|
facefusion//processors/modules/face_swapper.py: if not has_image(state_manager.get_item('source_paths')): |
|
facefusion//processors/modules/face_swapper.py: source_image_paths = filter_image_paths(state_manager.get_item('source_paths')) |
|
facefusion//processors/modules/face_swapper.py: source_frames = read_static_images(source_image_paths) |
|
facefusion//processors/modules/face_swapper.py: if mode in [ 'output', 'preview' ] and not is_image(state_manager.get_item('target_path')) and not is_video(state_manager.get_item('target_path')): |
|
facefusion//processors/modules/face_swapper.py: if mode == 'output' and not in_directory(state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_swapper.py: if mode == 'output' and not same_file_extension(state_manager.get_item('target_path'), state_manager.get_item('output_path')): |
|
facefusion//processors/modules/face_swapper.py: source_vision_frame = read_static_image(get_first(state_manager.get_item('source_paths'))) |
|
facefusion//processors/modules/face_swapper.py: model_path = get_model_options().get('sources').get('face_swapper').get('path') |
|
facefusion//processors/modules/face_swapper.py: model_initializer = get_static_model_initializer(model_path) |
|
facefusion//processors/modules/face_swapper.py:def process_frames(source_paths : List[str], queue_payloads : List[QueuePayload], update_progress : UpdateProgress) -> None: |
|
facefusion//processors/modules/face_swapper.py: source_frames = read_static_images(source_paths) |
|
facefusion//processors/modules/face_swapper.py: target_vision_path = queue_payload['frame_path'] |
|
facefusion//processors/modules/face_swapper.py: target_vision_frame = read_image(target_vision_path) |
|
facefusion//processors/modules/face_swapper.py: write_image(target_vision_path, output_vision_frame) |
|
facefusion//processors/modules/face_swapper.py:def process_image(source_paths : List[str], target_path : str, output_path : str) -> None: |
|
facefusion//processors/modules/face_swapper.py: source_frames = read_static_images(source_paths) |
|
facefusion//processors/modules/face_swapper.py: target_vision_frame = read_static_image(target_path) |
|
facefusion//processors/modules/face_swapper.py: write_image(output_path, output_vision_frame) |
|
facefusion//processors/modules/face_swapper.py:def process_video(source_paths : List[str], temp_frame_paths : List[str]) -> None: |
|
facefusion//processors/modules/face_swapper.py: processors.multi_process_frames(source_paths, temp_frame_paths, process_frames) |
|
facefusion//inference_manager.py: model_path = model_source_set.get(model_name).get('path') |
|
facefusion//inference_manager.py: if is_file(model_path): |
|
facefusion//inference_manager.py: inference_pool[model_name] = create_inference_session(model_path, execution_device_id, execution_providers) |
|
facefusion//inference_manager.py:def create_inference_session(model_path : str, execution_device_id : str, execution_providers : List[ExecutionProvider]) -> InferenceSession: |
|
facefusion//inference_manager.py: return InferenceSession(model_path, providers = inference_session_providers) |
|
facefusion//model_helper.py:def get_static_model_initializer(model_path : str) -> ModelInitializer: |
|
facefusion//model_helper.py: model = onnx.load(model_path) |
|
facefusion//args.py:from facefusion.filesystem import get_file_name, is_image, is_video, resolve_file_paths |
|
facefusion//args.py: # paths |
|
facefusion//args.py: apply_state_item('temp_path', args.get('temp_path')) |
|
facefusion//args.py: apply_state_item('jobs_path', args.get('jobs_path')) |
|
facefusion//args.py: apply_state_item('source_paths', args.get('source_paths')) |
|
facefusion//args.py: apply_state_item('target_path', args.get('target_path')) |
|
facefusion//args.py: apply_state_item('output_path', args.get('output_path')) |
|
facefusion//args.py: if is_image(args.get('target_path')): |
|
facefusion//args.py: output_image_resolution = detect_image_resolution(args.get('target_path')) |
|
facefusion//args.py: if is_video(args.get('target_path')): |
|
facefusion//args.py: output_video_resolution = detect_video_resolution(args.get('target_path')) |
|
facefusion//args.py: if args.get('output_video_fps') or is_video(args.get('target_path')): |
|
facefusion//args.py: output_video_fps = normalize_fps(args.get('output_video_fps')) or detect_video_fps(args.get('target_path')) |
|
facefusion//args.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//program.py:from facefusion.filesystem import get_file_name, resolve_file_paths |
|
facefusion//program.py:def create_config_path_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('--config-path', help = wording.get('help.config_path'), default = 'facefusion.ini') |
|
facefusion//program.py: job_store.register_job_keys([ 'config_path' ]) |
|
facefusion//program.py: apply_config_path(program) |
|
facefusion//program.py:def create_temp_path_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('--temp-path', help = wording.get('help.temp_path'), default = config.get_str_value('paths', 'temp_path', tempfile.gettempdir())) |
|
facefusion//program.py: job_store.register_job_keys([ 'temp_path' ]) |
|
facefusion//program.py:def create_jobs_path_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('--jobs-path', help = wording.get('help.jobs_path'), default = config.get_str_value('paths', 'jobs_path', '.jobs')) |
|
facefusion//program.py: job_store.register_job_keys([ 'jobs_path' ]) |
|
facefusion//program.py:def create_source_paths_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('-s', '--source-paths', help = wording.get('help.source_paths'), default = config.get_str_list('paths', 'source_paths'), nargs = '+') |
|
facefusion//program.py: job_store.register_step_keys([ 'source_paths' ]) |
|
facefusion//program.py:def create_target_path_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('-t', '--target-path', help = wording.get('help.target_path'), default = config.get_str_value('paths', 'target_path')) |
|
facefusion//program.py: job_store.register_step_keys([ 'target_path' ]) |
|
facefusion//program.py:def create_output_path_program() -> ArgumentParser: |
|
facefusion//program.py: group_paths = program.add_argument_group('paths') |
|
facefusion//program.py: group_paths.add_argument('-o', '--output-path', help = wording.get('help.output_path'), default = config.get_str_value('paths', 'output_path')) |
|
facefusion//program.py: job_store.register_step_keys([ 'output_path' ]) |
|
facefusion//program.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//program.py: available_ui_layouts = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/uis/layouts') ] |
|
facefusion//program.py: sub_program.add_parser('run', help = wording.get('help.run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_uis_program(), collect_job_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('headless-run', help = wording.get('help.headless_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('batch-run', help = wording.get('help.batch_run'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), create_source_pattern_program(), create_target_pattern_program(), create_output_pattern_program(), collect_step_program(), collect_job_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-list', help = wording.get('help.job_list'), parents = [ create_job_status_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-create', help = wording.get('help.job_create'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-submit', help = wording.get('help.job_submit'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-submit-all', help = wording.get('help.job_submit_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-delete', help = wording.get('help.job_delete'), parents = [ create_job_id_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-delete-all', help = wording.get('help.job_delete_all'), parents = [ create_jobs_path_program(), create_log_level_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-add-step', help = wording.get('help.job_add_step'), parents = [ create_job_id_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-remix-step', help = wording.get('help.job_remix_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-insert-step', help = wording.get('help.job_insert_step'), parents = [ create_job_id_program(), create_step_index_program(), create_config_path_program(), create_jobs_path_program(), create_source_paths_program(), create_target_path_program(), create_output_path_program(), collect_step_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-remove-step', help = wording.get('help.job_remove_step'), parents = [ create_job_id_program(), create_step_index_program(), create_jobs_path_program(), create_log_level_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-run', help = wording.get('help.job_run'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-run-all', help = wording.get('help.job_run_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-retry', help = wording.get('help.job_retry'), parents = [ create_job_id_program(), create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py: sub_program.add_parser('job-retry-all', help = wording.get('help.job_retry_all'), parents = [ create_config_path_program(), create_temp_path_program(), create_jobs_path_program(), collect_job_program(), create_halt_on_error_program() ], formatter_class = create_help_formatter_large) |
|
facefusion//program.py:def apply_config_path(program : ArgumentParser) -> None: |
|
facefusion//program.py: state_manager.init_item('config_path', known_args.config_path) |
|
facefusion//voice_extractor.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//voice_extractor.py: 'path': resolve_relative_path('../.assets/models/kim_vocal_2.hash') |
|
facefusion//voice_extractor.py: 'path': resolve_relative_path('../.assets/models/kim_vocal_2.onnx') |
|
facefusion//audio.py:def read_static_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: |
|
facefusion//audio.py: return read_audio(audio_path, fps) |
|
facefusion//audio.py:def read_audio(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: |
|
facefusion//audio.py: if is_audio(audio_path): |
|
facefusion//audio.py: audio_buffer = read_audio_buffer(audio_path, audio_sample_rate, audio_sample_size, audio_channel_total) |
|
facefusion//audio.py:def read_static_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: |
|
facefusion//audio.py: return read_voice(audio_path, fps) |
|
facefusion//audio.py:def read_voice(audio_path : str, fps : Fps) -> Optional[List[AudioFrame]]: |
|
facefusion//audio.py: if is_audio(audio_path): |
|
facefusion//audio.py: audio_buffer = read_audio_buffer(audio_path, voice_sample_rate, voice_sample_size, voice_channel_total) |
|
facefusion//audio.py:def get_audio_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: |
|
facefusion//audio.py: if is_audio(audio_path): |
|
facefusion//audio.py: audio_frames = read_static_audio(audio_path, fps) |
|
facefusion//audio.py:def get_voice_frame(audio_path : str, fps : Fps, frame_number : int = 0) -> Optional[AudioFrame]: |
|
facefusion//audio.py: if is_audio(audio_path): |
|
facefusion//audio.py: voice_frames = read_static_voice(audio_path, fps) |
|
facefusion//hash_helper.py:def validate_hash(validate_path : str) -> bool: |
|
facefusion//hash_helper.py: hash_path = get_hash_path(validate_path) |
|
facefusion//hash_helper.py: if is_file(hash_path): |
|
facefusion//hash_helper.py: with open(hash_path) as hash_file: |
|
facefusion//hash_helper.py: with open(validate_path, 'rb') as validate_file: |
|
facefusion//hash_helper.py:def get_hash_path(validate_path : str) -> Optional[str]: |
|
facefusion//hash_helper.py: if is_file(validate_path): |
|
facefusion//hash_helper.py: validate_directory_path, file_name_and_extension = os.path.split(validate_path) |
|
facefusion//hash_helper.py: return os.path.join(validate_directory_path, validate_file_name + '.hash') |
|
facefusion//choices.py: 'path': '/facefusion/facefusion-assets/releases/download/{base_name}/{file_name}' |
|
facefusion//choices.py: 'path': '/facefusion/{base_name}/resolve/main/{file_name}' |
|
facefusion//jobs/job_helper.py:def get_step_output_path(job_id : str, step_index : int, output_path : str) -> Optional[str]: |
|
facefusion//jobs/job_helper.py: if output_path: |
|
facefusion//jobs/job_helper.py: output_directory_path, _ = os.path.split(output_path) |
|
facefusion//jobs/job_helper.py: return os.path.join(output_directory_path, output_file_name + '-' + job_id + '-' + str(step_index) + output_file_extension) |
|
Binary file facefusion//jobs/__pycache__/job_runner.cpython-310.pyc matches |
|
Binary file facefusion//jobs/__pycache__/job_manager.cpython-310.pyc matches |
|
Binary file facefusion//jobs/__pycache__/job_helper.cpython-310.pyc matches |
|
facefusion//jobs/job_manager.py:from facefusion.jobs.job_helper import get_step_output_path |
|
facefusion//jobs/job_manager.py:JOBS_PATH : Optional[str] = None |
|
facefusion//jobs/job_manager.py:def init_jobs(jobs_path : str) -> bool: |
|
facefusion//jobs/job_manager.py: global JOBS_PATH |
|
facefusion//jobs/job_manager.py: JOBS_PATH = jobs_path |
|
facefusion//jobs/job_manager.py: job_status_paths = [ os.path.join(JOBS_PATH, job_status) for job_status in facefusion.choices.job_statuses ] |
|
facefusion//jobs/job_manager.py: for job_status_path in job_status_paths: |
|
facefusion//jobs/job_manager.py: create_directory(job_status_path) |
|
facefusion//jobs/job_manager.py: return all(is_directory(status_path) for status_path in job_status_paths) |
|
facefusion//jobs/job_manager.py:def clear_jobs(jobs_path : str) -> bool: |
|
facefusion//jobs/job_manager.py: return remove_directory(jobs_path) |
|
facefusion//jobs/job_manager.py: job_pattern = os.path.join(JOBS_PATH, job_status, '*.json') |
|
facefusion//jobs/job_manager.py: job_paths = resolve_file_pattern(job_pattern) |
|
facefusion//jobs/job_manager.py: job_paths.sort(key = os.path.getmtime) |
|
facefusion//jobs/job_manager.py: for job_path in job_paths: |
|
facefusion//jobs/job_manager.py: job_id = get_file_name(job_path) |
|
facefusion//jobs/job_manager.py: output_path = steps[step_index].get('args').get('output_path') |
|
facefusion//jobs/job_manager.py: step_args['target_path'] = get_step_output_path(job_id, step_index, output_path) |
|
facefusion//jobs/job_manager.py: job_path = find_job_path(job_id) |
|
facefusion//jobs/job_manager.py: return read_json(job_path) #type:ignore[return-value] |
|
facefusion//jobs/job_manager.py: job_path = find_job_path(job_id) |
|
facefusion//jobs/job_manager.py: if not is_file(job_path): |
|
facefusion//jobs/job_manager.py: job_create_path = suggest_job_path(job_id, 'drafted') |
|
facefusion//jobs/job_manager.py: return write_json(job_create_path, job) #type:ignore[arg-type] |
|
facefusion//jobs/job_manager.py: job_path = find_job_path(job_id) |
|
facefusion//jobs/job_manager.py: if is_file(job_path): |
|
facefusion//jobs/job_manager.py: return write_json(job_path, job) #type:ignore[arg-type] |
|
facefusion//jobs/job_manager.py: job_path = find_job_path(job_id) |
|
facefusion//jobs/job_manager.py: job_move_path = suggest_job_path(job_id, job_status) |
|
facefusion//jobs/job_manager.py: return move_file(job_path, job_move_path) |
|
facefusion//jobs/job_manager.py: job_path = find_job_path(job_id) |
|
facefusion//jobs/job_manager.py: return remove_file(job_path) |
|
facefusion//jobs/job_manager.py:def suggest_job_path(job_id : str, job_status : JobStatus) -> Optional[str]: |
|
facefusion//jobs/job_manager.py: return os.path.join(JOBS_PATH, job_status, job_file_name) |
|
facefusion//jobs/job_manager.py:def find_job_path(job_id : str) -> Optional[str]: |
|
facefusion//jobs/job_manager.py: job_pattern = os.path.join(JOBS_PATH, job_status, job_file_name) |
|
facefusion//jobs/job_manager.py: job_paths = resolve_file_pattern(job_pattern) |
|
facefusion//jobs/job_manager.py: for job_path in job_paths: |
|
facefusion//jobs/job_manager.py: return job_path |
|
facefusion//jobs/job_runner.py: output_path = step_args.get('output_path') |
|
facefusion//jobs/job_runner.py: step_output_path = job_helper.get_step_output_path(job_id, step_index, output_path) |
|
facefusion//jobs/job_runner.py: return move_file(output_path, step_output_path) and job_manager.set_step_status(job_id, step_index, 'completed') |
|
facefusion//jobs/job_runner.py: for output_path, temp_output_paths in output_set.items(): |
|
facefusion//jobs/job_runner.py: if are_videos(temp_output_paths): |
|
facefusion//jobs/job_runner.py: if not concat_video(output_path, temp_output_paths): |
|
facefusion//jobs/job_runner.py: if are_images(temp_output_paths): |
|
facefusion//jobs/job_runner.py: for temp_output_path in temp_output_paths: |
|
facefusion//jobs/job_runner.py: if not move_file(temp_output_path, output_path): |
|
facefusion//jobs/job_runner.py: for temp_output_paths in output_set.values(): |
|
facefusion//jobs/job_runner.py: for temp_output_path in temp_output_paths: |
|
facefusion//jobs/job_runner.py: if not remove_file(temp_output_path): |
|
facefusion//jobs/job_runner.py: output_path = step.get('args').get('output_path') |
|
facefusion//jobs/job_runner.py: if output_path: |
|
facefusion//jobs/job_runner.py: step_output_path = job_manager.get_step_output_path(job_id, index, output_path) |
|
facefusion//jobs/job_runner.py: job_output_set.setdefault(output_path, []).append(step_output_path) |
|
facefusion//exit_helper.py: if state_manager.get_item('target_path'): |
|
facefusion//exit_helper.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//uis/ui_helper.py:def suggest_output_path(output_directory_path : str, target_path : str) -> Optional[str]: |
|
facefusion//uis/ui_helper.py: if is_image(target_path) or is_video(target_path): |
|
facefusion//uis/ui_helper.py: target_file_extension = get_file_extension(target_path) |
|
facefusion//uis/ui_helper.py: return os.path.join(output_directory_path, output_file_name + target_file_extension) |
|
facefusion//uis/core.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//uis/core.py: os.environ['GRADIO_TEMP_DIR'] = os.path.join(state_manager.get_item('temp_path'), 'gradio') |
|
facefusion//uis/core.py: overrides_css_path = resolve_relative_path('uis/assets/overrides.css') |
|
facefusion//uis/core.py: return open(overrides_css_path).read() |
|
facefusion//uis/overrides.py:from facefusion.temp_helper import create_temp_directory, get_temp_file_path |
|
facefusion//uis/overrides.py:def convert_video_to_playable_mp4(video_path : str) -> str: |
|
facefusion//uis/overrides.py: video_file_size = get_file_size(video_path) |
|
facefusion//uis/overrides.py: create_temp_directory(video_path) |
|
facefusion//uis/overrides.py: temp_video_path = get_temp_file_path(video_path) |
|
facefusion//uis/overrides.py: ffmpeg_builder.set_input(video_path), |
|
facefusion//uis/overrides.py: ffmpeg_builder.force_output(temp_video_path) |
|
facefusion//uis/overrides.py: return temp_video_path |
|
facefusion//uis/overrides.py: if not video_is_playable(video_path): |
|
facefusion//uis/overrides.py: create_temp_directory(video_path) |
|
facefusion//uis/overrides.py: temp_video_path = get_temp_file_path(video_path) |
|
facefusion//uis/overrides.py: ffmpeg_builder.set_input(video_path), |
|
facefusion//uis/overrides.py: ffmpeg_builder.force_output(temp_video_path) |
|
facefusion//uis/overrides.py: return temp_video_path |
|
facefusion//uis/overrides.py: return video_path |
|
facefusion//uis/overrides.py:def check_allowed(path : str, check_in_upload_folder : bool) -> None: |
|
Binary file facefusion//uis/__pycache__/overrides.cpython-310.pyc matches |
|
Binary file facefusion//uis/__pycache__/core.cpython-310.pyc matches |
|
Binary file facefusion//uis/__pycache__/ui_helper.cpython-310.pyc matches |
|
facefusion//uis/components/preview.py:from facefusion.filesystem import filter_audio_paths, is_image, is_video |
|
facefusion//uis/components/preview.py: source_frames = read_static_images(state_manager.get_item('source_paths')) |
|
facefusion//uis/components/preview.py: source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths'))) |
|
facefusion//uis/components/preview.py: if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'): |
|
facefusion//uis/components/preview.py: temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), state_manager.get_item('reference_frame_number')) |
|
facefusion//uis/components/preview.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: target_vision_frame = read_static_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/preview.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) |
|
facefusion//uis/components/preview.py: preview_frame_slider_options['maximum'] = count_video_frame_total(state_manager.get_item('target_path')) |
|
facefusion//uis/components/preview.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: preview_vision_frame = normalize_frame_color(read_video_frame(state_manager.get_item('target_path'), frame_number)) |
|
facefusion//uis/components/preview.py: source_frames = read_static_images(state_manager.get_item('source_paths')) |
|
facefusion//uis/components/preview.py: source_audio_path = get_first(filter_audio_paths(state_manager.get_item('source_paths'))) |
|
facefusion//uis/components/preview.py: if source_audio_path and state_manager.get_item('output_video_fps') and state_manager.get_item('reference_frame_number'): |
|
facefusion//uis/components/preview.py: temp_audio_frame = get_audio_frame(source_audio_path, state_manager.get_item('output_video_fps'), reference_audio_frame_number) |
|
facefusion//uis/components/preview.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: target_vision_frame = read_static_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/preview.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), frame_number) |
|
facefusion//uis/components/preview.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/preview.py: video_frame_total = count_video_frame_total(state_manager.get_item('target_path')) |
|
facefusion//uis/components/execution.py:from facefusion.filesystem import get_file_name, resolve_file_paths |
|
facefusion//uis/components/execution.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//uis/components/benchmark.py: 'target_path', |
|
facefusion//uis/components/benchmark.py:def suggest_output_path(target_path : str) -> Optional[str]: |
|
facefusion//uis/components/benchmark.py: if is_video(target_path): |
|
facefusion//uis/components/benchmark.py: target_file_extension = get_file_extension(target_path) |
|
facefusion//uis/components/benchmark.py: return os.path.join(tempfile.gettempdir(), hashlib.sha1().hexdigest()[:8] + target_file_extension) |
|
facefusion//uis/components/benchmark.py: state_manager.init_item('source_paths', [ '.assets/examples/source.jpg', '.assets/examples/source.mp3' ]) |
|
facefusion//uis/components/benchmark.py: target_paths = [ BENCHMARKS[benchmark_run] for benchmark_run in benchmark_runs if benchmark_run in BENCHMARKS ] |
|
facefusion//uis/components/benchmark.py: if target_paths: |
|
facefusion//uis/components/benchmark.py: for target_path in target_paths: |
|
facefusion//uis/components/benchmark.py: state_manager.init_item('target_path', target_path) |
|
facefusion//uis/components/benchmark.py: state_manager.init_item('output_path', suggest_output_path(state_manager.get_item('target_path'))) |
|
facefusion//uis/components/benchmark.py: video_frame_total = count_video_frame_total(state_manager.get_item('target_path')) |
|
facefusion//uis/components/benchmark.py: output_video_resolution = detect_video_resolution(state_manager.get_item('target_path')) |
|
facefusion//uis/components/benchmark.py: state_manager.init_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path'))) |
|
facefusion//uis/components/benchmark.py: state_manager.get_item('target_path'), |
|
facefusion//uis/components/download.py:from facefusion.filesystem import get_file_name, resolve_file_paths |
|
facefusion//uis/components/download.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//uis/components/trim_frame.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/trim_frame.py: video_frame_total = count_video_frame_total(state_manager.get_item('target_path')) |
|
facefusion//uis/components/trim_frame.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/trim_frame.py: video_frame_total = count_video_frame_total(state_manager.get_item('target_path')) |
|
facefusion//uis/components/trim_frame.py: video_frame_total = count_video_frame_total(state_manager.get_item('target_path')) |
|
Binary file facefusion//uis/components/__pycache__/temp_frame.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/face_selector.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/output_options.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/job_runner.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/download.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/target.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/trim_frame.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/execution.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/source.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/instant_runner.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/job_manager.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/preview.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/processors.cpython-310.pyc matches |
|
Binary file facefusion//uis/components/__pycache__/output.cpython-310.pyc matches |
|
facefusion//uis/components/job_manager.py:from facefusion.uis.ui_helper import convert_int_none, convert_str_none, suggest_output_path |
|
facefusion//uis/components/job_manager.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/components/job_manager.py: output_path = step_args.get('output_path') |
|
facefusion//uis/components/job_manager.py: if is_directory(step_args.get('output_path')): |
|
facefusion//uis/components/job_manager.py: step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path')) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_manager.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/webcam.py:from facefusion.filesystem import filter_image_paths, is_directory |
|
facefusion//uis/components/webcam.py: source_image_paths = filter_image_paths(state_manager.get_item('source_paths')) |
|
facefusion//uis/components/webcam.py: source_frames = read_static_images(source_image_paths) |
|
facefusion//uis/components/webcam.py: device_directory_path = '/sys/devices/virtual/video4linux' |
|
facefusion//uis/components/webcam.py: if is_directory(device_directory_path): |
|
facefusion//uis/components/webcam.py: device_names = os.listdir(device_directory_path) |
|
facefusion//uis/components/webcam.py: device_path = '/dev/' + device_name |
|
facefusion//uis/components/webcam.py: commands.extend(ffmpeg_builder.set_output(device_path)) |
|
facefusion//uis/components/face_selector.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/face_selector.py: reference_frame = read_static_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/face_selector.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/face_selector.py: reference_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) |
|
facefusion//uis/components/face_selector.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/face_selector.py: temp_vision_frame = read_static_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/face_selector.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/face_selector.py: temp_vision_frame = read_video_frame(state_manager.get_item('target_path'), state_manager.get_item('reference_frame_number')) |
|
facefusion//uis/components/job_list_options.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/components/target.py: is_target_image = is_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/target.py: is_target_video = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/target.py: value = state_manager.get_item('target_path') if is_target_image or is_target_video else None |
|
facefusion//uis/components/target.py: target_image_options['value'] = TARGET_FILE.value.get('path') |
|
facefusion//uis/components/target.py: target_video_options['value'] = TARGET_FILE.value.get('path') |
|
facefusion//uis/components/target.py: state_manager.set_item('target_path', file.name) |
|
facefusion//uis/components/target.py: state_manager.set_item('target_path', file.name) |
|
facefusion//uis/components/target.py: state_manager.clear_item('target_path') |
|
facefusion//uis/components/temp_frame.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/temp_frame.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/processors.py:from facefusion.filesystem import get_file_name, resolve_file_paths |
|
facefusion//uis/components/processors.py: available_processors = [ get_file_name(file_path) for file_path in resolve_file_paths('facefusion/processors/modules') ] |
|
facefusion//uis/components/job_runner.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/components/source.py:from facefusion.filesystem import filter_audio_paths, filter_image_paths, has_audio, has_image |
|
facefusion//uis/components/source.py: has_source_audio = has_audio(state_manager.get_item('source_paths')) |
|
facefusion//uis/components/source.py: has_source_image = has_image(state_manager.get_item('source_paths')) |
|
facefusion//uis/components/source.py: value = state_manager.get_item('source_paths') if has_source_audio or has_source_image else None |
|
facefusion//uis/components/source.py: source_file_names = [ source_file_value.get('path') for source_file_value in SOURCE_FILE.value ] if SOURCE_FILE.value else None |
|
facefusion//uis/components/source.py: source_audio_path = get_first(filter_audio_paths(source_file_names)) |
|
facefusion//uis/components/source.py: source_image_path = get_first(filter_image_paths(source_file_names)) |
|
facefusion//uis/components/source.py: value = source_audio_path if has_source_audio else None, |
|
facefusion//uis/components/source.py: value = source_image_path if has_source_image else None, |
|
facefusion//uis/components/source.py: source_audio_path = get_first(filter_audio_paths(file_names)) |
|
facefusion//uis/components/source.py: source_image_path = get_first(filter_image_paths(file_names)) |
|
facefusion//uis/components/source.py: state_manager.set_item('source_paths', file_names) |
|
facefusion//uis/components/source.py: return gradio.Audio(value = source_audio_path, visible = has_source_audio), gradio.Image(value = source_image_path, visible = has_source_image) |
|
facefusion//uis/components/source.py: state_manager.clear_item('source_paths') |
|
facefusion//uis/components/output_options.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/output_options.py: output_image_resolution = detect_image_resolution(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/output_options.py: output_video_resolution = detect_video_resolution(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_image(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: visible = is_video(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: if is_image(state_manager.get_item('target_path')): |
|
facefusion//uis/components/output_options.py: output_image_resolution = detect_image_resolution(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: if is_video(state_manager.get_item('target_path')): |
|
facefusion//uis/components/output_options.py: output_video_resolution = detect_video_resolution(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output_options.py: state_manager.set_item('output_video_fps', detect_video_fps(state_manager.get_item('target_path'))) |
|
facefusion//uis/components/instant_runner.py:from facefusion.uis.ui_helper import suggest_output_path |
|
facefusion//uis/components/instant_runner.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/components/instant_runner.py: output_path = step_args.get('output_path') |
|
facefusion//uis/components/instant_runner.py: if is_directory(step_args.get('output_path')): |
|
facefusion//uis/components/instant_runner.py: step_args['output_path'] = suggest_output_path(step_args.get('output_path'), state_manager.get_item('target_path')) |
|
facefusion//uis/components/instant_runner.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/components/instant_runner.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/instant_runner.py: if is_image(step_args.get('output_path')): |
|
facefusion//uis/components/instant_runner.py: return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = step_args.get('output_path'), visible = True), gradio.Video(value = None, visible = False) |
|
facefusion//uis/components/instant_runner.py: if is_video(step_args.get('output_path')): |
|
facefusion//uis/components/instant_runner.py: return gradio.Button(visible = True), gradio.Button(visible = False), gradio.Image(value = None, visible = False), gradio.Video(value = step_args.get('output_path'), visible = True) |
|
facefusion//uis/components/instant_runner.py: if state_manager.get_item('target_path'): |
|
facefusion//uis/components/instant_runner.py: clear_temp_directory(state_manager.get_item('target_path')) |
|
facefusion//uis/components/output.py:OUTPUT_PATH_TEXTBOX : Optional[gradio.Textbox] = None |
|
facefusion//uis/components/output.py: global OUTPUT_PATH_TEXTBOX |
|
facefusion//uis/components/output.py: if not state_manager.get_item('output_path'): |
|
facefusion//uis/components/output.py: state_manager.set_item('output_path', tempfile.gettempdir()) |
|
facefusion//uis/components/output.py: OUTPUT_PATH_TEXTBOX = gradio.Textbox( |
|
facefusion//uis/components/output.py: label = wording.get('uis.output_path_textbox'), |
|
facefusion//uis/components/output.py: value = state_manager.get_item('output_path'), |
|
facefusion//uis/components/output.py: OUTPUT_PATH_TEXTBOX.change(update_output_path, inputs = OUTPUT_PATH_TEXTBOX) |
|
facefusion//uis/components/output.py:def update_output_path(output_path : str) -> None: |
|
facefusion//uis/components/output.py: state_manager.set_item('output_path', output_path) |
|
facefusion//uis/components/job_list.py: if job_manager.init_jobs(state_manager.get_item('jobs_path')): |
|
facefusion//uis/layouts/benchmark.py: ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser')) |
|
Binary file facefusion//uis/layouts/__pycache__/default.cpython-310.pyc matches |
|
facefusion//uis/layouts/jobs.py: ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser')) |
|
facefusion//uis/layouts/default.py: ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser')) |
|
facefusion//uis/layouts/webcam.py: ui.launch(favicon_path = 'facefusion.ico', inbrowser = state_manager.get_item('open_browser')) |
|
facefusion//app_context.py: if os.path.join('facefusion', 'jobs') in frame.f_code.co_filename: |
|
facefusion//app_context.py: if os.path.join('facefusion', 'uis') in frame.f_code.co_filename: |
|
facefusion//json.py:def read_json(json_path : str) -> Optional[Content]: |
|
facefusion//json.py: if is_file(json_path): |
|
facefusion//json.py: with open(json_path) as json_file: |
|
facefusion//json.py:def write_json(json_path : str, content : Content) -> bool: |
|
facefusion//json.py: with open(json_path, 'w') as json_file: |
|
facefusion//json.py: return is_file(json_path) |
|
facefusion//face_masker.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_1.hash') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_1.onnx') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_2.hash') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_2.onnx') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_3.hash') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/xseg_3.onnx') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.hash') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/bisenet_resnet_18.onnx') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.hash') |
|
facefusion//face_masker.py: 'path': resolve_relative_path('../.assets/models/bisenet_resnet_34.onnx') |
|
facefusion//face_detector.py:from facefusion.filesystem import resolve_relative_path |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/retinaface_10g.hash') |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/retinaface_10g.onnx') |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.hash') |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/scrfd_2.5g.onnx') |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/yoloface_8n.hash') |
|
facefusion//face_detector.py: 'path': resolve_relative_path('../.assets/models/yoloface_8n.onnx') |