File size: 3,729 Bytes
24d01e0
16d62e2
 
 
cac9909
16d62e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24d01e0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16d62e2
3fa127c
 
cac9909
 
 
 
 
16d62e2
cac9909
16d62e2
24d01e0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
from transformers import AutoTokenizer, AutoModelForCausalLM
from bs4 import BeautifulSoup, Tag
import datetime
import requests
import torch
import re

NoisePatterns = {
	'(No)Script':          r'<[ ]*(script|noscript)[^>]*?>.*?<\/[ ]*\1[ ]*>',
	'Style':               r'<[ ]*(style)[^>]*?>.*?<\/[ ]*\1[ ]*>',
	'Svg':                 r'<[ ]*(svg)[^>]*?>.*?<\/[ ]*\1[ ]*>',

	'Meta+Link':           r'<[ ]*(meta|link)[^>]*?[\/]?[ ]*>',
	'Comment':             r'<[ ]*!--.*?--[ ]*>',
	'Base64Img':           r'<[ ]*img[^>]+src="data:image\/[^;]+;base64,[^"]+"[^>]*[\/]?[ ]*>',
	'DocType':             r'<!(DOCTYPE|doctype)[ ]*[a-z]*>',

	'DataAttributes':      r'[ ]+data-[\w-]+="[^"]*"',
	'Classes':             r'[ ]+class="[^"]*"',
	'EmptyAttributes':     r'[ ]+[a-z-]+=""',
	'DateTime':            r'[ ]+datetime="[^"]*"',

	'EmptyTags':           r'(?:<[ ]*([a-z]{1,10})[^>]*>[ \t\r\n]*){1,5}(?:<\/[ ]*\1[ ]*>){1,5}',
	'EmptyLines':          r'^[ \t]*\r?\n',
}

def RemoveNoise(RawHtml: str) -> str:
	'''Remove noise from HTML content.
	Args:
		RawHtml (str): The raw HTML content.
	Returns:
		str: Cleaned HTML content without noise.
	'''
	CleanedHtml = RawHtml
	for PatternName, Pattern in NoisePatterns.items():
		if PatternName in ['EmptyLines', 'EmptyTags']: # These patterns are line-based
			CleanedHtml = re.sub(Pattern, '', CleanedHtml, flags=re.MULTILINE)
		else:
			CleanedHtml = re.sub(Pattern, '', CleanedHtml, flags=re.DOTALL | re.IGNORECASE | re.MULTILINE)
	return CleanedHtml

def FetchHtmlContent(Url: str) -> str | int:
	'''Fetch HTML content from a URL.
	Args:
		Url (str): The URL to fetch HTML content from.
	Returns:
		str: The raw HTML content.
	'''
	Headers = {
		'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
	}
	Response = requests.get(Url, headers=Headers)
	if Response.status_code == 200:
		return Response.text
	else:
		return Response.status_code

def PurifyHtml(Url: str) -> str: # type: ignore
	Start = datetime.datetime.now()
	RawHtml = FetchHtmlContent(Url)
	if isinstance(RawHtml, str):
		RawCharCount = len(RawHtml)

		Soup = BeautifulSoup(RawHtml, 'html.parser')
		PrettifiedHtml = str(Soup.prettify())

		Title = Soup.title.string if Soup.title else 'No title found'
		MetaDesc = Soup.find('meta', attrs={'name': 'description'})
		Description = MetaDesc.get('content', 'No description found') if isinstance(MetaDesc, Tag) else 'No description found'

		CleanedHtml = RemoveNoise(PrettifiedHtml)

		CleanedCharCount = len(CleanedHtml)
		Ratio = CleanedCharCount / RawCharCount if RawCharCount > 0 else 0

		Summary = [
			'<!-- --- Purification Summary ---',
			f'URL: {Url}',
			f'Title: {Title}',
			f'Description: {Description}',
			f'Time of Fetch: {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")} (Took {datetime.datetime.now() - Start})',
			f'Noise Removal Ratio: {Ratio:.2%} (lower is better)',
			f'Characters: {RawCharCount} -> {CleanedCharCount} ({RawCharCount - CleanedCharCount} characters removed)',
			'----------------------------- -->'
		]
		for Line in Summary:
			print(Line)

		Tokenizer = AutoTokenizer.from_pretrained('jinaai/ReaderLM-v2')
		Model = AutoModelForCausalLM.from_pretrained('jinaai/ReaderLM-v2', torch_dtype=torch.float32, device_map='cpu')

		Prompt = f'Convert this HTML to markdown:\n\n{CleanedHtml}'
		Inputs = Tokenizer(Prompt, return_tensors='pt', truncation=True, max_length=8192)
		Outputs = Model.generate(Inputs.input_ids, max_new_tokens=8192, do_sample=False)
		SummaryOutput = Tokenizer.decode(Outputs[0], skip_special_tokens=True)

		return SummaryOutput[len(Prompt):].strip()

	else:
		print(f'Failed to fetch HTML content. Status code: {RawHtml}')