File size: 13,966 Bytes
519c06d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
"""

ArXiv Fetcher Component

Fetches and processes research papers from ArXiv

"""

import re
import time
import requests
from typing import List, Dict, Optional, Any
from datetime import datetime, timedelta
import arxiv


class ArxivFetcher:
    """

    Fetches research papers from ArXiv

    Provides search, download, and metadata extraction capabilities

    """
    
    def __init__(self, config = None):
        # Import Config only when needed to avoid dependency issues
        if config is None:
            try:
                from .config import Config
                self.config = Config()
            except ImportError:
                # Fallback to None if Config cannot be imported
                self.config = None
        else:
            self.config = config
        self.client = arxiv.Client()
    
    def search_papers(self, 

                     query: str, 

                     max_results: int = 10,

                     sort_by: str = "relevance",

                     category: str = None,

                     date_range: int = None) -> List[Dict[str, Any]]:
        """

        Search for papers on ArXiv

        

        Args:

            query: Search query

            max_results: Maximum number of results

            sort_by: Sort criteria ('relevance', 'lastUpdatedDate', 'submittedDate')

            category: ArXiv category filter (e.g., 'cs.AI', 'cs.LG')

            date_range: Days back to search (e.g., 7, 30, 365)

            

        Returns:

            List of paper dictionaries

        """
        try:
            print(f"Searching ArXiv for: '{query}'")
            
            # Build search query
            search_query = query
            if category:
                search_query = f"cat:{category} AND {query}"
            
            # Set sort criteria
            sort_criteria = {
                "relevance": arxiv.SortCriterion.Relevance,
                "lastUpdatedDate": arxiv.SortCriterion.LastUpdatedDate,
                "submittedDate": arxiv.SortCriterion.SubmittedDate
            }.get(sort_by, arxiv.SortCriterion.Relevance)
            
            # Create search
            search = arxiv.Search(
                query=search_query,
                max_results=max_results,
                sort_by=sort_criteria,
                sort_order=arxiv.SortOrder.Descending
            )
            
            papers = []
            for result in self.client.results(search):
                # Date filtering
                if date_range:
                    cutoff_date = datetime.now() - timedelta(days=date_range)
                    if result.published.replace(tzinfo=None) < cutoff_date:
                        continue
                
                # Extract paper information
                paper = self._extract_paper_info(result)
                papers.append(paper)
            
            print(f"Found {len(papers)} papers")
            return papers
            
        except Exception as e:
            print(f"Error searching ArXiv: {e}")
            return []
    
    def get_paper_by_id(self, arxiv_id: str) -> Optional[Dict[str, Any]]:
        """

        Get a specific paper by ArXiv ID

        

        Args:

            arxiv_id: ArXiv paper ID (e.g., '2301.12345')

            

        Returns:

            Paper dictionary or None

        """
        try:
            print(f"Fetching paper: {arxiv_id}")
            
            search = arxiv.Search(id_list=[arxiv_id])
            results = list(self.client.results(search))
            
            if results:
                paper = self._extract_paper_info(results[0])
                print(f"Retrieved paper: {paper['title']}")
                return paper
            else:
                print(f"Paper not found: {arxiv_id}")
                return None
                
        except Exception as e:
            print(f"Error fetching paper {arxiv_id}: {e}")
            return None
    
    def search_by_author(self, author: str, max_results: int = 20) -> List[Dict[str, Any]]:
        """

        Search for papers by author

        

        Args:

            author: Author name

            max_results: Maximum number of results

            

        Returns:

            List of paper dictionaries

        """
        query = f"au:{author}"
        return self.search_papers(query, max_results=max_results, sort_by="lastUpdatedDate")
    
    def search_by_category(self, category: str, max_results: int = 20) -> List[Dict[str, Any]]:
        """

        Search for papers by category

        

        Args:

            category: ArXiv category (e.g., 'cs.AI', 'cs.LG', 'stat.ML')

            max_results: Maximum number of results

            

        Returns:

            List of paper dictionaries

        """
        query = f"cat:{category}"
        return self.search_papers(query, max_results=max_results, sort_by="lastUpdatedDate")
    
    def get_trending_papers(self, category: str = "cs.AI", days: int = 7, max_results: int = 10) -> List[Dict[str, Any]]:
        """

        Get trending papers in a category

        

        Args:

            category: ArXiv category

            days: Days back to look for papers

            max_results: Maximum number of results

            

        Returns:

            List of paper dictionaries

        """
        return self.search_by_category(category, max_results=max_results)
    
    def _extract_paper_info(self, result) -> Dict[str, Any]:
        """

        Extract paper information from ArXiv result

        

        Args:

            result: ArXiv search result

            

        Returns:

            Paper dictionary

        """
        try:
            # Extract ArXiv ID
            arxiv_id = result.entry_id.split('/')[-1]
            
            # Clean and format data
            paper = {
                'arxiv_id': arxiv_id,
                'title': result.title.strip(),
                'authors': [author.name for author in result.authors],
                'summary': result.summary.strip(),
                'published': result.published.isoformat(),
                'updated': result.updated.isoformat(),
                'categories': result.categories,
                'primary_category': result.primary_category,
                'pdf_url': result.pdf_url,
                'entry_id': result.entry_id,
                'journal_ref': result.journal_ref,
                'doi': result.doi,
                'comment': result.comment,
                'links': [{'title': link.title, 'href': link.href} for link in result.links],
                'fetched_at': datetime.now().isoformat()
            }
            
            # Add formatted metadata
            paper['authors_str'] = ', '.join(paper['authors'][:3]) + ('...' if len(paper['authors']) > 3 else '')
            paper['categories_str'] = ', '.join(paper['categories'][:3]) + ('...' if len(paper['categories']) > 3 else '')
            paper['year'] = result.published.year
            paper['month'] = result.published.month
            
            return paper
            
        except Exception as e:
            print(f"Error extracting paper info: {e}")
            return {
                'arxiv_id': 'unknown',
                'title': 'Error extracting title',
                'authors': [],
                'summary': 'Error extracting summary',
                'error': str(e)
            }
    
    def download_pdf(self, paper: Dict[str, Any], download_dir: str = "downloads") -> Optional[str]:
        """

        Download PDF for a paper

        

        Args:

            paper: Paper dictionary

            download_dir: Directory to save PDF

            

        Returns:

            Path to downloaded PDF or None

        """
        try:
            import os
            os.makedirs(download_dir, exist_ok=True)
            
            pdf_url = paper.get('pdf_url')
            if not pdf_url:
                print(f"No PDF URL for paper: {paper.get('title', 'Unknown')}")
                return None
            
            arxiv_id = paper.get('arxiv_id', 'unknown')
            filename = f"{arxiv_id}.pdf"
            filepath = os.path.join(download_dir, filename)
            
            if os.path.exists(filepath):
                print(f"PDF already exists: {filepath}")
                return filepath
            
            print(f"Downloading PDF: {paper.get('title', 'Unknown')}")
            
            response = requests.get(pdf_url, timeout=30)
            response.raise_for_status()
            
            with open(filepath, 'wb') as f:
                f.write(response.content)
            
            print(f"PDF downloaded: {filepath}")
            return filepath
            
        except Exception as e:
            print(f"Error downloading PDF: {e}")
            return None
    
    def get_paper_recommendations(self, paper_id: str, max_results: int = 5) -> List[Dict[str, Any]]:
        """

        Get paper recommendations based on a paper's content

        

        Args:

            paper_id: ArXiv ID of the base paper

            max_results: Number of recommendations

            

        Returns:

            List of recommended papers

        """
        try:
            # Get the base paper
            base_paper = self.get_paper_by_id(paper_id)
            if not base_paper:
                return []
            
            # Extract key terms from title and summary
            title = base_paper.get('title', '')
            summary = base_paper.get('summary', '')
            categories = base_paper.get('categories', [])
            
            # Simple keyword extraction (can be improved with NLP)
            keywords = self._extract_keywords(title + ' ' + summary)
            
            # Search for related papers
            query = ' '.join(keywords[:5])  # Use top 5 keywords
            
            related_papers = self.search_papers(
                query=query,
                max_results=max_results + 5,  # Get more to filter out the original
                sort_by="relevance"
            )
            
            # Filter out the original paper
            recommendations = [p for p in related_papers if p.get('arxiv_id') != paper_id]
            
            return recommendations[:max_results]
            
        except Exception as e:
            print(f"Error getting recommendations: {e}")
            return []
    
    def _extract_keywords(self, text: str) -> List[str]:
        """

        Simple keyword extraction from text

        

        Args:

            text: Input text

            

        Returns:

            List of keywords

        """
        # Simple implementation - can be improved with NLP libraries
        import re
        from collections import Counter
        
        # Remove common stop words
        stop_words = {'the', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'for', 'of', 'with', 'by', 'a', 'an', 'as', 'is', 'was', 'are', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should', 'may', 'might', 'must', 'can', 'this', 'that', 'these', 'those', 'we', 'us', 'our', 'you', 'your', 'he', 'him', 'his', 'she', 'her', 'it', 'its', 'they', 'them', 'their'}
        
        # Extract words
        words = re.findall(r'\b[a-zA-Z]{3,}\b', text.lower())
        
        # Filter and count
        filtered_words = [word for word in words if word not in stop_words]
        word_counts = Counter(filtered_words)
        
        # Return most common words
        return [word for word, count in word_counts.most_common(20)]
    
    def get_categories(self) -> Dict[str, str]:
        """

        Get available ArXiv categories

        

        Returns:

            Dictionary of category codes and descriptions

        """
        return {
            'cs.AI': 'Artificial Intelligence',
            'cs.LG': 'Machine Learning',
            'cs.CV': 'Computer Vision',
            'cs.CL': 'Computation and Language',
            'cs.NE': 'Neural and Evolutionary Computing',
            'cs.RO': 'Robotics',
            'cs.CR': 'Cryptography and Security',
            'cs.DC': 'Distributed, Parallel, and Cluster Computing',
            'cs.DB': 'Databases',
            'cs.DS': 'Data Structures and Algorithms',
            'cs.HC': 'Human-Computer Interaction',
            'cs.IR': 'Information Retrieval',
            'cs.IT': 'Information Theory',
            'cs.MM': 'Multimedia',
            'cs.NI': 'Networking and Internet Architecture',
            'cs.OS': 'Operating Systems',
            'cs.PL': 'Programming Languages',
            'cs.SE': 'Software Engineering',
            'cs.SY': 'Systems and Control',
            'stat.ML': 'Machine Learning (Statistics)',
            'stat.AP': 'Applications (Statistics)',
            'stat.CO': 'Computation (Statistics)',
            'stat.ME': 'Methodology (Statistics)',
            'stat.TH': 'Statistics Theory',
            'math.ST': 'Statistics Theory (Mathematics)',
            'math.PR': 'Probability (Mathematics)',
            'math.OC': 'Optimization and Control',
            'math.NA': 'Numerical Analysis',
            'eess.AS': 'Audio and Speech Processing',
            'eess.IV': 'Image and Video Processing',
            'eess.SP': 'Signal Processing',
            'eess.SY': 'Systems and Control',
            'q-bio.QM': 'Quantitative Methods',
            'q-bio.NC': 'Neurons and Cognition',
            'physics.data-an': 'Data Analysis, Statistics and Probability'
        }