File size: 14,409 Bytes
75e2b6c
 
 
 
 
 
 
b3b4203
75e2b6c
 
 
 
b3b4203
75e2b6c
 
 
 
 
 
 
 
 
 
 
 
 
b3b4203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75e2b6c
 
958ee21
b3b4203
 
 
 
958ee21
b3b4203
 
 
 
 
 
75e2b6c
 
 
 
 
 
 
 
 
 
 
 
b3b4203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
958ee21
 
75e2b6c
 
 
b3b4203
 
 
 
 
 
 
 
 
 
75e2b6c
b3b4203
 
 
 
 
 
75e2b6c
b3b4203
 
 
 
75e2b6c
 
b3b4203
75e2b6c
 
 
b3b4203
75e2b6c
 
b3b4203
 
958ee21
 
 
 
 
 
75e2b6c
 
 
b3b4203
75e2b6c
 
 
 
b3b4203
75e2b6c
 
 
 
 
 
 
 
 
 
 
b3b4203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
958ee21
b3b4203
958ee21
b3b4203
 
 
 
 
 
75e2b6c
 
b3b4203
958ee21
 
75e2b6c
b3b4203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
import re
import warnings
import requests
from bs4 import BeautifulSoup
import urllib.parse
import time
import random
from urllib.parse import urlparse, parse_qs

warnings.simplefilter('ignore', requests.packages.urllib3.exceptions.InsecureRequestWarning)

class WebSearch:
    def __init__(self, num_results=4, max_chars_per_page=6000, max_images=10):
        self.num_results = num_results
        self.max_chars_per_page = max_chars_per_page
        self.reference = []
        self.results = []
        self.max_images = max_images
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'DNT': '1',
            'Connection': 'keep-alive',
        }
        # Common domains for direct content
        self.content_domains = [
            "wikipedia.org", "webmd.com", "mayoclinic.org", "healthline.com", "nih.gov", 
            "clevelandclinic.org", "nhs.uk", "cdc.gov", "medlineplus.gov", "hopkinsmedicine.org"
        ]
        # Ad and tracking domains to filter out
        self.blocked_domains = [
            "ad.doubleclick.net", "googleadservices.com", "bing.com/aclick", "duckduckgo.com/y.js",
            "amazon.com/s", "ads.google.com", "analytics", "tracker", "pixel", "adservice"
        ]

    def is_valid_url(self, url):
        """Check if URL is valid and not an ad/tracking URL"""
        if not url or len(url) < 10:
            return False
            
        try:
            parsed = urlparse(url)
            
            # Check if URL has a valid scheme and netloc
            if not all([parsed.scheme, parsed.netloc]):
                return False
                
            # Filter out ad/tracking URLs
            domain = parsed.netloc.lower()
            path = parsed.path.lower()
            query = parsed.query.lower()
            
            # Block URLs containing ad-related indicators
            for blocked in self.blocked_domains:
                if blocked in domain or blocked in path:
                    return False
                    
            # Block URLs with ad-related query parameters
            if any(param in query for param in ["ad", "click", "track", "clkid", "msclkid"]):
                return False
                
            # Extra check for redirect URLs
            if "redirect" in path or "goto" in path or "go.php" in path:
                return False
                
            # Reject extremely long URLs (often tracking)
            if len(url) > 500:
                return False
                
            return True
            
        except Exception:
            return False

    def clean_url(self, url):
        """Clean the URL by removing tracking parameters"""
        try:
            parsed = urlparse(url)
            
            # List of known tracking parameters to remove
            tracking_params = [
                'utm_', 'ref_', 'ref=', 'refid', 'fbclid', 'gclid', 'msclkid', 'dclid',
                'zanpid', 'icid', 'igshid', 'mc_eid', '_hsenc', 'mkt_tok', 'yclid'
            ]
            
            # Parse query parameters
            query_params = parse_qs(parsed.query)
            
            # Remove tracking parameters
            filtered_params = {
                k: v for k, v in query_params.items() 
                if not any(tracker in k.lower() for tracker in tracking_params)
            }
            
            # Rebuild query string
            clean_query = urllib.parse.urlencode(filtered_params, doseq=True) if filtered_params else ""
            
            # Reconstruct URL
            clean_url = urllib.parse.urlunparse((
                parsed.scheme,
                parsed.netloc,
                parsed.path,
                parsed.params,
                clean_query,
                ""  # Remove fragment
            ))
            
            return clean_url
            
        except Exception:
            # If any error occurs, return the original URL
            return url

    def extract_real_url_from_redirect(self, url):
        """Extract the actual URL from a redirect URL"""
        try:
            parsed = urlparse(url)
            
            # Handle DuckDuckGo redirects
            if "duckduckgo.com" in parsed.netloc and "u3=" in parsed.query:
                params = parse_qs(parsed.query)
                if "u3" in params and params["u3"]:
                    redirect_url = params["u3"][0]
                    # Handle nested redirects (like Bing redirects inside DuckDuckGo)
                    if "bing.com/aclick" in redirect_url:
                        bing_parsed = urlparse(redirect_url)
                        bing_params = parse_qs(bing_parsed.query)
                        if "u" in bing_params and bing_params["u"]:
                            decoded_url = urllib.parse.unquote(bing_params["u"][0])
                            return self.clean_url(decoded_url)
                    return self.clean_url(redirect_url)
            
            # Handle Bing redirects
            if "bing.com/aclick" in url:
                params = parse_qs(parsed.query)
                if "u" in params and params["u"]:
                    return self.clean_url(urllib.parse.unquote(params["u"][0]))
                    
            return url
            
        except Exception:
            return url

    def extract_text_from_webpage(self, html_content):
        soup = BeautifulSoup(html_content, "html.parser")
        
        # Remove non-content elements
        for tag in soup(["script", "style", "header", "footer", "nav", "form", "svg", 
                         "aside", "iframe", "noscript", "img", "figure", "button"]):
            tag.extract()
            
        # Extract text and normalize spacing
        text = ' '.join(soup.stripped_strings)
        text = re.sub(r'\s+', ' ', text).strip()
        
        return text

    def search(self, query):
        results = []
        encoded_query = urllib.parse.quote(query)
        url = f'https://html.duckduckgo.com/html/?q={encoded_query}'

        try:
            with requests.Session() as session:
                session.headers.update(self.headers)

                response = session.get(url, timeout=10)
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # Getting more results than needed to account for filtering
                search_results = soup.find_all('div', class_='result')[:self.num_results * 2]
                links = []
                
                # Extract and process links
                for result in search_results:
                    link_tag = result.find('a', class_='result__a')
                    if not link_tag or not link_tag.get('href'):
                        continue
                        
                    original_link = link_tag['href']
                    
                    # Process link to get the actual URL
                    clean_link = self.extract_real_url_from_redirect(original_link)
                    
                    # Validate the URL
                    if self.is_valid_url(clean_link):
                        links.append(clean_link)
                
                # Prioritize content domains
                prioritized_links = []
                other_links = []
                
                for link in links:
                    if any(domain in link for domain in self.content_domains):
                        prioritized_links.append(link)
                    else:
                        other_links.append(link)
                
                # Combine prioritized links first, then others
                final_links = prioritized_links + other_links
                
                # Limit to unique links up to num_results
                unique_links = []
                seen_domains = set()
                
                for link in final_links:
                    domain = urlparse(link).netloc
                    if domain not in seen_domains and len(unique_links) < self.num_results:
                        unique_links.append(link)
                        seen_domains.add(domain)

                from concurrent.futures import ThreadPoolExecutor, as_completed

                def fetch_page(link):
                    try:
                        # Random delay to avoid being blocked
                        time.sleep(random.uniform(0.5, 1.5))
                        
                        # Set a longer timeout for reliable fetching
                        page_response = session.get(link, timeout=10, verify=False)
                        
                        # Only process HTML content
                        if 'text/html' not in page_response.headers.get('Content-Type', ''):
                            return None
                            
                        page_soup = BeautifulSoup(page_response.text, 'lxml')
                        
                        # Remove non-content elements
                        [tag.decompose() for tag in page_soup(['script', 'style', 'header', 'footer', 
                                                               'nav', 'form', 'iframe', 'noscript'])]
                        
                        # Extract text with better formatting
                        text = ' '.join(page_soup.stripped_strings)
                        text = re.sub(r'\s+', ' ', text).strip()
                        
                        title = page_soup.title.string if page_soup.title else "Untitled Page"
                        
                        return {
                            'link': link,
                            'title': title,
                            'text': text[:self.max_chars_per_page]
                        }
                    except Exception as e:
                        print(f"Error fetching {link}: {str(e)}")
                        return None

                with ThreadPoolExecutor(max_workers=min(len(unique_links), 4)) as executor:
                    future_to_url = {executor.submit(fetch_page, link): link for link in unique_links}

                    for future in as_completed(future_to_url):
                        result = future.result()
                        if result:
                            results.append(result)

                return results

        except Exception as e:
            print(f"Search error: {str(e)}")
            return []

    def search_images(self, query):
        images = []
        encoded_query = urllib.parse.quote(query)

        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'DNT': '1',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1'
        }

        # Try multiple sources for better results
        image_sources = [
            f"https://www.google.com/search?q={encoded_query}&tbm=isch&hl=en",
            f"https://www.bing.com/images/search?q={encoded_query}&form=HDRSC2&first=1",
            f"https://duckduckgo.com/?q={encoded_query}&iar=images&iax=images&ia=images"
        ]
        
        for source_url in image_sources:
            try:
                time.sleep(random.uniform(0.5, 1.0))  # Polite delay
                response = requests.get(source_url, headers=headers, verify=False, timeout=10)
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # Extract image URLs from img tags
                for img in soup.find_all('img'):
                    src = img.get('src', '')
                    if src and src.startswith('http') and self.is_image_url(src):
                        cleaned_url = self.clean_url(src)
                        if self.is_valid_image(cleaned_url):
                            images.append(cleaned_url)
                
                # Extract image URLs from scripts (useful for Google Images)
                for script in soup.find_all('script'):
                    if script.string:
                        urls = re.findall(r'https?://[^\s<>"\']+?(?:\.(?:jpg|jpeg|png|gif|bmp|webp))', script.string)
                        for url in urls:
                            cleaned_url = self.clean_url(url)
                            if self.is_valid_image(cleaned_url):
                                images.append(cleaned_url)
                
            except Exception as e:
                print(f"Error searching images at {source_url}: {str(e)}")
                continue

        # Remove duplicates while preserving order
        seen = set()
        unique_images = [x for x in images if not (x in seen or seen.add(x))]
        
        # Filter out small images and suspicious URLs
        filtered_images = [img for img in unique_images if self.is_valid_image(img)]
        
        return filtered_images[:self.max_images]

    def is_image_url(self, url):
        """Check if URL points to an image file"""
        image_extensions = ('.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp')
        return any(url.lower().endswith(ext) for ext in image_extensions)

    def is_valid_image(self, url):
        """Additional validation for image URLs"""
        try:
            # Reject tiny images (often icons) and tracking pixels
            if re.search(r'(?:icon|pixel|tracker|thumb|logo|button)\d*\.(?:jpg|png|gif)', url.lower()):
                return False
                
            # Avoid suspicious domains for images
            parsed = urlparse(url)
            if any(bad in parsed.netloc.lower() for bad in ["tracker", "pixel", "counter", "ad."]):
                return False
                
            # Avoid very short URLs (likely not valid images)
            if len(url) < 30:
                return False
                
            return True
        except:
            return False