|
import os |
|
import shutil |
|
import subprocess |
|
import tempfile |
|
import zipfile |
|
import requests |
|
import rarfile |
|
import tarfile |
|
from flask import Flask, render_template, request, jsonify, send_file |
|
from urllib.parse import urlparse |
|
from werkzeug.utils import secure_filename |
|
import json |
|
from pathlib import Path |
|
import threading |
|
import time |
|
|
|
app = Flask(__name__) |
|
app.config['UPLOAD_FOLDER'] = 'temp_repos' |
|
app.config['OUTPUT_FOLDER'] = 'outputs' |
|
app.config['MAX_CONTENT_LENGTH'] = 500 * 1024 * 1024 |
|
|
|
|
|
ALLOWED_EXTENSIONS = { |
|
'zip', 'rar', '7z', 'tar', 'tar.gz', 'tgz', 'tar.bz2', 'tar.xz', |
|
'py', 'js', 'html', 'css', 'java', 'cpp', 'c', 'h', 'php', 'rb', |
|
'go', 'rs', 'ts', 'vue', 'jsx', 'tsx', 'md', 'txt', 'json', 'xml', |
|
'yaml', 'yml', 'ini', 'cfg', 'conf', 'sh', 'bat', 'ps1', 'sql' |
|
} |
|
|
|
|
|
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True) |
|
os.makedirs(app.config['OUTPUT_FOLDER'], exist_ok=True) |
|
|
|
|
|
download_progress = {} |
|
|
|
def allowed_file(filename): |
|
"""检查文件是否允许上传""" |
|
if '.' not in filename: |
|
return False |
|
extension = filename.rsplit('.', 1)[1].lower() |
|
|
|
if filename.lower().endswith(('.tar.gz', '.tar.bz2', '.tar.xz')): |
|
return True |
|
return extension in ALLOWED_EXTENSIONS |
|
|
|
def extract_archive(file_path, extract_to, session_id): |
|
"""解压各种格式的压缩包""" |
|
try: |
|
download_progress[session_id] = {'status': 'extracting', 'progress': 30} |
|
|
|
if file_path.lower().endswith('.zip'): |
|
with zipfile.ZipFile(file_path, 'r') as zip_ref: |
|
zip_ref.extractall(extract_to) |
|
|
|
elif file_path.lower().endswith('.rar'): |
|
with rarfile.RarFile(file_path, 'r') as rar_ref: |
|
rar_ref.extractall(extract_to) |
|
|
|
elif file_path.lower().endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tar.xz')): |
|
mode = 'r' |
|
if file_path.lower().endswith(('.tar.gz', '.tgz')): |
|
mode = 'r:gz' |
|
elif file_path.lower().endswith('.tar.bz2'): |
|
mode = 'r:bz2' |
|
elif file_path.lower().endswith('.tar.xz'): |
|
mode = 'r:xz' |
|
|
|
with tarfile.open(file_path, mode) as tar_ref: |
|
tar_ref.extractall(extract_to) |
|
|
|
elif file_path.lower().endswith('.7z'): |
|
|
|
import subprocess |
|
try: |
|
subprocess.run(['7z', 'x', file_path, f'-o{extract_to}'], check=True, capture_output=True) |
|
except (subprocess.CalledProcessError, FileNotFoundError): |
|
raise Exception("7z文件需要安装7zip命令行工具") |
|
else: |
|
raise Exception(f"不支持的压缩格式: {file_path}") |
|
|
|
download_progress[session_id] = {'status': 'completed', 'progress': 100} |
|
return True |
|
|
|
except Exception as e: |
|
download_progress[session_id] = {'status': 'error', 'message': f'解压失败: {str(e)}'} |
|
return False |
|
|
|
def process_uploaded_file(file_path, session_id): |
|
"""处理上传的文件""" |
|
try: |
|
download_progress[session_id] = {'status': 'processing', 'progress': 10} |
|
|
|
filename = os.path.basename(file_path) |
|
extract_dir = os.path.join(app.config['UPLOAD_FOLDER'], f"{session_id}_uploaded") |
|
|
|
if os.path.exists(extract_dir): |
|
shutil.rmtree(extract_dir) |
|
os.makedirs(extract_dir) |
|
|
|
|
|
if any(filename.lower().endswith(ext) for ext in ['.zip', '.rar', '.7z', '.tar', '.tar.gz', '.tgz', '.tar.bz2', '.tar.xz']): |
|
|
|
success = extract_archive(file_path, extract_dir, session_id) |
|
if not success: |
|
return None |
|
else: |
|
|
|
download_progress[session_id] = {'status': 'copying', 'progress': 50} |
|
shutil.copy2(file_path, extract_dir) |
|
download_progress[session_id] = {'status': 'completed', 'progress': 100} |
|
|
|
return extract_dir |
|
|
|
except Exception as e: |
|
download_progress[session_id] = {'status': 'error', 'message': str(e)} |
|
return None |
|
"""从GitHub URL提取用户名和仓库名""" |
|
parsed = urlparse(url) |
|
path_parts = parsed.path.strip('/').split('/') |
|
if len(path_parts) >= 2: |
|
return path_parts[0], path_parts[1] |
|
return None, None |
|
|
|
def download_github_repo(url, session_id): |
|
"""下载GitHub仓库""" |
|
try: |
|
download_progress[session_id] = {'status': 'starting', 'progress': 0} |
|
|
|
username, repo_name = extract_github_info(url) |
|
if not username or not repo_name: |
|
download_progress[session_id] = {'status': 'error', 'message': '无效的GitHub URL'} |
|
return None |
|
|
|
|
|
temp_dir = os.path.join(app.config['UPLOAD_FOLDER'], f"{session_id}_{repo_name}") |
|
if os.path.exists(temp_dir): |
|
shutil.rmtree(temp_dir) |
|
|
|
download_progress[session_id] = {'status': 'downloading', 'progress': 20} |
|
|
|
|
|
zip_url = f"https://github.com/{username}/{repo_name}/archive/refs/heads/main.zip" |
|
|
|
|
|
response = requests.get(zip_url, stream=True) |
|
if response.status_code != 200: |
|
zip_url = f"https://github.com/{username}/{repo_name}/archive/refs/heads/master.zip" |
|
response = requests.get(zip_url, stream=True) |
|
|
|
if response.status_code != 200: |
|
download_progress[session_id] = {'status': 'error', 'message': '无法下载仓库,请检查URL是否正确'} |
|
return None |
|
|
|
download_progress[session_id] = {'status': 'downloading', 'progress': 50} |
|
|
|
|
|
zip_path = os.path.join(app.config['UPLOAD_FOLDER'], f"{session_id}_{repo_name}.zip") |
|
with open(zip_path, 'wb') as f: |
|
for chunk in response.iter_content(chunk_size=8192): |
|
f.write(chunk) |
|
|
|
download_progress[session_id] = {'status': 'extracting', 'progress': 70} |
|
|
|
|
|
with zipfile.ZipFile(zip_path, 'r') as zip_ref: |
|
zip_ref.extractall(app.config['UPLOAD_FOLDER']) |
|
|
|
|
|
extracted_folders = [f for f in os.listdir(app.config['UPLOAD_FOLDER']) |
|
if f.startswith(f"{repo_name}-") and os.path.isdir(os.path.join(app.config['UPLOAD_FOLDER'], f))] |
|
|
|
if extracted_folders: |
|
extracted_path = os.path.join(app.config['UPLOAD_FOLDER'], extracted_folders[0]) |
|
os.rename(extracted_path, temp_dir) |
|
|
|
|
|
os.remove(zip_path) |
|
|
|
download_progress[session_id] = {'status': 'completed', 'progress': 100} |
|
return temp_dir |
|
|
|
except Exception as e: |
|
download_progress[session_id] = {'status': 'error', 'message': str(e)} |
|
return None |
|
|
|
def get_file_tree(directory, ignore_dirs=None): |
|
"""获取文件树结构""" |
|
if ignore_dirs is None: |
|
ignore_dirs = set() |
|
|
|
def should_ignore(path): |
|
return any(ignore_pattern in path for ignore_pattern in [ |
|
'node_modules', '__pycache__', '.git', '.idea', 'venv', 'env', |
|
'.DS_Store', 'Thumbs.db', '*.pyc', '*.pyo', '*.pyd' |
|
]) |
|
|
|
tree = [] |
|
|
|
try: |
|
for root, dirs, files in os.walk(directory): |
|
|
|
dirs[:] = [d for d in dirs if not should_ignore(os.path.join(root, d))] |
|
|
|
level = root.replace(directory, '').count(os.sep) |
|
indent = ' ' * 2 * level |
|
|
|
folder_name = os.path.basename(root) |
|
if level > 0: |
|
tree.append({ |
|
'type': 'folder', |
|
'name': folder_name, |
|
'path': root, |
|
'level': level |
|
}) |
|
|
|
sub_indent = ' ' * 2 * (level + 1) |
|
for file in files: |
|
if not should_ignore(file): |
|
file_path = os.path.join(root, file) |
|
tree.append({ |
|
'type': 'file', |
|
'name': file, |
|
'path': file_path, |
|
'level': level + 1, |
|
'size': os.path.getsize(file_path) if os.path.exists(file_path) else 0 |
|
}) |
|
except Exception as e: |
|
print(f"Error generating file tree: {e}") |
|
|
|
return tree |
|
|
|
def copy_selected_files_to_txt(source_dir, output_file, selected_files): |
|
"""将选中的文件内容复制到txt文件""" |
|
try: |
|
with open(output_file, 'w', encoding='utf-8') as outfile: |
|
for file_path in selected_files: |
|
if os.path.exists(file_path) and os.path.isfile(file_path): |
|
relative_path = os.path.relpath(file_path, source_dir) |
|
|
|
outfile.write(f"{'='*50}\n") |
|
outfile.write(f"文件路径: {relative_path}\n") |
|
outfile.write(f"{'='*50}\n\n") |
|
|
|
try: |
|
with open(file_path, 'r', encoding='utf-8') as infile: |
|
content = infile.read() |
|
outfile.write(content) |
|
except UnicodeDecodeError: |
|
try: |
|
with open(file_path, 'r', encoding='gbk') as infile: |
|
content = infile.read() |
|
outfile.write(content) |
|
except: |
|
outfile.write("[二进制文件或编码错误,无法显示内容]\n") |
|
except Exception as e: |
|
outfile.write(f"[读取文件时出错: {str(e)}]\n") |
|
|
|
outfile.write("\n\n") |
|
return True |
|
except Exception as e: |
|
print(f"Error copying files: {e}") |
|
return False |
|
|
|
@app.route('/') |
|
def index(): |
|
return render_template('index.html') |
|
|
|
@app.route('/upload', methods=['POST']) |
|
def upload_file(): |
|
if 'file' not in request.files: |
|
return jsonify({'error': '没有选择文件'}), 400 |
|
|
|
file = request.files['file'] |
|
if file.filename == '': |
|
return jsonify({'error': '没有选择文件'}), 400 |
|
|
|
if not allowed_file(file.filename): |
|
return jsonify({'error': '不支持的文件格式'}), 400 |
|
|
|
session_id = str(int(time.time())) |
|
|
|
try: |
|
|
|
filename = secure_filename(file.filename) |
|
file_path = os.path.join(app.config['UPLOAD_FOLDER'], f"{session_id}_{filename}") |
|
file.save(file_path) |
|
|
|
|
|
thread = threading.Thread(target=process_uploaded_file, args=(file_path, session_id)) |
|
thread.start() |
|
|
|
return jsonify({'session_id': session_id, 'filename': filename}) |
|
|
|
except Exception as e: |
|
return jsonify({'error': f'上传失败: {str(e)}'}), 500 |
|
def download_repo(): |
|
data = request.get_json() |
|
github_url = data.get('url') |
|
session_id = data.get('session_id', str(int(time.time()))) |
|
|
|
if not github_url: |
|
return jsonify({'error': '请提供GitHub URL'}), 400 |
|
|
|
|
|
thread = threading.Thread(target=download_github_repo, args=(github_url, session_id)) |
|
thread.start() |
|
|
|
return jsonify({'session_id': session_id}) |
|
|
|
@app.route('/progress/<session_id>') |
|
def get_progress(session_id): |
|
return jsonify(download_progress.get(session_id, {'status': 'unknown'})) |
|
|
|
@app.route('/files/<session_id>') |
|
def get_files(session_id): |
|
|
|
for item in os.listdir(app.config['UPLOAD_FOLDER']): |
|
if item.startswith(session_id) and os.path.isdir(os.path.join(app.config['UPLOAD_FOLDER'], item)): |
|
repo_path = os.path.join(app.config['UPLOAD_FOLDER'], item) |
|
file_tree = get_file_tree(repo_path) |
|
return jsonify({'files': file_tree, 'repo_path': repo_path}) |
|
|
|
return jsonify({'error': '未找到仓库文件'}), 404 |
|
|
|
@app.route('/merge', methods=['POST']) |
|
def merge_files(): |
|
data = request.get_json() |
|
session_id = data.get('session_id') |
|
selected_files = data.get('selected_files', []) |
|
|
|
if not session_id or not selected_files: |
|
return jsonify({'error': '缺少必要参数'}), 400 |
|
|
|
|
|
repo_path = None |
|
for item in os.listdir(app.config['UPLOAD_FOLDER']): |
|
if item.startswith(session_id) and os.path.isdir(os.path.join(app.config['UPLOAD_FOLDER'], item)): |
|
repo_path = os.path.join(app.config['UPLOAD_FOLDER'], item) |
|
break |
|
|
|
if not repo_path: |
|
return jsonify({'error': '未找到仓库'}), 404 |
|
|
|
|
|
output_filename = f"merged_{session_id}.txt" |
|
output_path = os.path.join(app.config['OUTPUT_FOLDER'], output_filename) |
|
|
|
success = copy_selected_files_to_txt(repo_path, output_path, selected_files) |
|
|
|
if success: |
|
return jsonify({'download_url': f'/download_result/{output_filename}'}) |
|
else: |
|
return jsonify({'error': '合并文件时出错'}), 500 |
|
|
|
@app.route('/download_result/<filename>') |
|
def download_result(filename): |
|
file_path = os.path.join(app.config['OUTPUT_FOLDER'], filename) |
|
if os.path.exists(file_path): |
|
return send_file(file_path, as_attachment=True, download_name=filename) |
|
return "文件不存在", 404 |
|
|
|
if __name__ == '__main__': |
|
app.run(host='0.0.0.0', port=7860, debug=True) |