|
import re |
|
import string |
|
|
|
import requests |
|
from langchain.callbacks import get_openai_callback |
|
from langchain_anthropic import ChatAnthropic |
|
|
|
from translator.prompt_glossary import PROMPT_WITH_GLOSSARY |
|
|
|
|
|
def get_content(filepath: str) -> str: |
|
if filepath == "": |
|
raise ValueError("No files selected for translation.") |
|
|
|
url = string.Template( |
|
"https://raw.githubusercontent.com/huggingface/" "transformers/main/$filepath" |
|
).safe_substitute(filepath=filepath) |
|
response = requests.get(url) |
|
if response.status_code == 200: |
|
content = response.text |
|
return content |
|
else: |
|
raise ValueError("Failed to retrieve content from the URL.", url) |
|
|
|
|
|
def preprocess_content(content: str) -> str: |
|
|
|
|
|
|
|
to_translate = content[content.find("#") :] |
|
|
|
|
|
|
|
|
|
|
|
to_translate = re.sub(r"\n\n+", "\n\n", to_translate) |
|
return to_translate |
|
|
|
|
|
def get_full_prompt(language: str, to_translate: str, additional_instruction: str = "") -> str: |
|
base_prompt = string.Template( |
|
"What do these sentences about Hugging Face Transformers " |
|
"(a machine learning library) mean in $language? " |
|
"Please do not translate the word after a 🤗 emoji " |
|
"as it is a product name. Output the complete markdown file**, with prose translated and all other content intact" |
|
"No explanations or extras—only the translated markdown. Also translate all comments within code blocks as well." |
|
).safe_substitute(language=language) |
|
|
|
base_prompt += "\n\n```md" |
|
|
|
full_prompt = "\n".join([base_prompt, to_translate.strip(), "```", PROMPT_WITH_GLOSSARY]) |
|
|
|
if additional_instruction.strip(): |
|
full_prompt += f"\n\n🗒️ Additional instructions: {additional_instruction.strip()}" |
|
|
|
return full_prompt |
|
|
|
|
|
def split_markdown_sections(markdown: str) -> list: |
|
|
|
return re.split(r"^(#+\s+)(.*)$", markdown, flags=re.MULTILINE)[1:] |
|
|
|
|
|
|
|
def get_anchors(divided: list) -> list: |
|
anchors = [] |
|
|
|
for title in divided[1::3]: |
|
anchor = re.sub(r"[^a-z0-9\s]+", "", title.lower()) |
|
anchor = re.sub(r"\s{2,}", " ", anchor.strip()).replace(" ", "-") |
|
anchors.append(f"[[{anchor}]]") |
|
return anchors |
|
|
|
|
|
def make_scaffold(content: str, to_translate: str) -> string.Template: |
|
scaffold = content |
|
for i, text in enumerate(to_translate.split("\n\n")): |
|
scaffold = scaffold.replace(text, f"$hf_i18n_placeholder{i}", 1) |
|
print("inner scaffold:") |
|
print(scaffold) |
|
return string.Template(scaffold) |
|
|
|
|
|
def is_in_code_block(text: str, position: int) -> bool: |
|
"""Check if a position in text is inside a code block""" |
|
text_before = text[:position] |
|
code_block_starts = text_before.count("```") |
|
return code_block_starts % 2 == 1 |
|
|
|
|
|
def fill_scaffold(content: str, to_translate: str, translated: str) -> str: |
|
scaffold = make_scaffold(content, to_translate) |
|
print("scaffold:") |
|
print(scaffold.template) |
|
|
|
|
|
original_sections = to_translate.split("\n\n") |
|
|
|
|
|
divided = split_markdown_sections(to_translate) |
|
print("divided:") |
|
print(divided) |
|
anchors = get_anchors(divided) |
|
|
|
|
|
translated_divided = split_markdown_sections(translated) |
|
print("translated divided:") |
|
print(translated_divided) |
|
|
|
|
|
if len(translated_divided[1::3]) != len(anchors): |
|
print(f"Warning: Header count mismatch. Original: {len(anchors)}, Translated: {len(translated_divided[1::3])}") |
|
|
|
if len(translated_divided[1::3]) < len(anchors): |
|
anchors = anchors[:len(translated_divided[1::3])] |
|
else: |
|
|
|
anchors.extend([""] * (len(translated_divided[1::3]) - len(anchors))) |
|
|
|
|
|
for i, korean_title in enumerate(translated_divided[1::3]): |
|
if i < len(anchors): |
|
|
|
header_pos = translated.find(korean_title.strip()) |
|
if header_pos != -1 and not is_in_code_block(translated, header_pos): |
|
translated_divided[1 + i * 3] = f"{korean_title} {anchors[i]}" |
|
else: |
|
translated_divided[1 + i * 3] = korean_title |
|
|
|
|
|
reconstructed_translated = "".join([ |
|
"".join(translated_divided[i * 3 : i * 3 + 3]) |
|
for i in range(len(translated_divided) // 3) |
|
]) |
|
|
|
|
|
translated_sections = reconstructed_translated.split("\n\n") |
|
|
|
print("scaffold template count:") |
|
print(scaffold.template.count("$hf_i18n_placeholder")) |
|
print("original sections length:") |
|
print(len(original_sections)) |
|
print("translated sections length:") |
|
print(len(translated_sections)) |
|
|
|
|
|
placeholder_count = scaffold.template.count("$hf_i18n_placeholder") |
|
|
|
if len(translated_sections) < placeholder_count: |
|
|
|
translated_sections.extend([""] * (placeholder_count - len(translated_sections))) |
|
elif len(translated_sections) > placeholder_count: |
|
|
|
translated_sections = translated_sections[:placeholder_count] |
|
|
|
|
|
if len(translated_sections) != placeholder_count: |
|
return f"Error: Section count mismatch. Expected: {placeholder_count}, Got: {len(translated_sections)}" |
|
|
|
translated_doc = scaffold.safe_substitute( |
|
{f"hf_i18n_placeholder{i}": text for i, text in enumerate(translated_sections)} |
|
) |
|
return translated_doc |
|
|
|
|
|
def llm_translate(to_translate: str) -> tuple[str, str]: |
|
with get_openai_callback() as cb: |
|
model = ChatAnthropic( |
|
model="claude-sonnet-4-20250514", max_tokens=64000, streaming=True |
|
) |
|
ai_message = model.invoke(to_translate) |
|
print("cb:", cb) |
|
return cb, ai_message.content |
|
|