|
|
|
import re |
|
import traceback |
|
from typing import List, Dict, Any |
|
import language_tool_python |
|
|
|
from text_utils import convert_markdown_to_plain_text |
|
|
|
|
|
def perform_language_checks(markdown_text_from_filtered_pdf: str) -> Dict[str, Any]: |
|
""" |
|
Performs LanguageTool checks on plain text derived from font-filtered Markdown. |
|
Filters issues to only include those between "abstract" and "references/bibliography" |
|
found within this specific text. |
|
""" |
|
if not markdown_text_from_filtered_pdf or not markdown_text_from_filtered_pdf.strip(): |
|
print("LT_Checker: Input Markdown text is empty.") |
|
return {"total_issues": 0, "issues_list": [], "text_used_for_analysis": ""} |
|
|
|
plain_text_from_markdown = convert_markdown_to_plain_text(markdown_text_from_filtered_pdf) |
|
text_for_lt_analysis = plain_text_from_markdown.replace('\n', ' ') |
|
text_for_lt_analysis = re.sub(r'\s+', ' ', text_for_lt_analysis).strip() |
|
|
|
if not text_for_lt_analysis: |
|
print("LT_Checker: Plain text derived from Markdown is empty after cleaning.") |
|
return {"total_issues": 0, "issues_list": [], "text_used_for_analysis": ""} |
|
|
|
text_for_lt_analysis_lower = text_for_lt_analysis.lower() |
|
|
|
abstract_match = re.search(r'\babstract\b', text_for_lt_analysis_lower) |
|
content_start_index = abstract_match.start() if abstract_match else 0 |
|
if abstract_match: |
|
print(f"LT_Checker: Found 'abstract' at index {content_start_index} in its text.") |
|
else: |
|
print(f"LT_Checker: Did not find 'abstract', LT analysis from index 0 of its text.") |
|
|
|
|
|
references_match = re.search(r'\breferences\b', text_for_lt_analysis_lower) |
|
bibliography_match = re.search(r'\bbibliography\b', text_for_lt_analysis_lower) |
|
content_end_index = len(text_for_lt_analysis) |
|
|
|
if references_match and bibliography_match: |
|
content_end_index = min(references_match.start(), bibliography_match.start()) |
|
print(f"LT_Checker: Found 'references' at {references_match.start()} and 'bibliography' at {bibliography_match.start()}. Using {content_end_index} as end boundary.") |
|
elif references_match: |
|
content_end_index = references_match.start() |
|
print(f"LT_Checker: Found 'references' at {content_end_index}. Using it as end boundary.") |
|
elif bibliography_match: |
|
content_end_index = bibliography_match.start() |
|
print(f"LT_Checker: Found 'bibliography' at {content_end_index}. Using it as end boundary.") |
|
else: |
|
print(f"LT_Checker: Did not find 'references' or 'bibliography'. LT analysis up to end of its text (index {content_end_index}).") |
|
|
|
if content_start_index >= content_end_index: |
|
print(f"LT_Checker: Warning: Content start index ({content_start_index}) is not before end index ({content_end_index}) in its text. No LT issues will be reported from this range.") |
|
|
|
tool = None |
|
processed_lt_issues: List[Dict[str, Any]] = [] |
|
try: |
|
tool = language_tool_python.LanguageTool('en-US') |
|
raw_lt_matches = tool.check(text_for_lt_analysis) |
|
|
|
lt_issues_in_range = 0 |
|
for idx, match in enumerate(raw_lt_matches): |
|
if match.ruleId == "EN_SPLIT_WORDS_HYPHEN": continue |
|
|
|
if not (content_start_index <= match.offset < content_end_index): |
|
continue |
|
lt_issues_in_range += 1 |
|
|
|
|
|
error_text_verbatim = match.matchedText |
|
|
|
|
|
words_around = 1 |
|
|
|
|
|
pre_error_text = text_for_lt_analysis[:match.offset] |
|
words_before = pre_error_text.split()[-words_around:] |
|
|
|
|
|
post_error_text = text_for_lt_analysis[match.offset + match.errorLength:] |
|
words_after = post_error_text.split()[:words_around] |
|
|
|
|
|
context_parts = [] |
|
if words_before: |
|
context_parts.append(" ".join(words_before)) |
|
context_parts.append(error_text_verbatim) |
|
if words_after: |
|
context_parts.append(" ".join(words_after)) |
|
|
|
wider_context_str = " ".join(context_parts) |
|
|
|
processed_lt_issues.append({ |
|
'_internal_id': f"lt_{idx}", |
|
'ruleId': match.ruleId, |
|
'message': match.message, |
|
'context_text': wider_context_str, |
|
'error_text_verbatim': error_text_verbatim, |
|
'offset_in_text': match.offset, |
|
'error_length': match.errorLength, |
|
'replacements_suggestion': match.replacements[:3] if match.replacements else [], |
|
'category_name': match.category, |
|
'source_check_type': 'LanguageTool', |
|
'is_mapped_to_pdf': False, |
|
'pdf_coordinates_list': [], |
|
'mapped_page_number': -1 |
|
}) |
|
print(f"LT_Checker: LanguageTool found {len(raw_lt_matches)} raw issues, {lt_issues_in_range} issues within defined content range of its text.") |
|
|
|
return { |
|
"total_issues": len(processed_lt_issues), |
|
"issues_list": processed_lt_issues, |
|
"text_used_for_analysis": text_for_lt_analysis |
|
} |
|
except Exception as e: |
|
print(f"Error in perform_language_checks: {e}\n{traceback.format_exc()}") |
|
return {"error": str(e), "total_issues": 0, "issues_list": [], "text_used_for_analysis": text_for_lt_analysis} |
|
finally: |
|
if tool: |
|
tool.close() |