ginipick commited on
Commit
0c71fbf
Β·
verified Β·
1 Parent(s): 1862e24

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -1475
app.py CHANGED
@@ -1,1491 +1,49 @@
1
- import os, json, re, logging, requests, markdown, time, io
2
- from datetime import datetime
3
- import random
4
- import base64
5
- from io import BytesIO
6
- from PIL import Image
7
  import sys
8
  import streamlit as st
9
- from openai import OpenAI
10
-
11
- from gradio_client import Client
12
- import pandas as pd
13
- import PyPDF2 # For handling PDF files
14
- import kagglehub
15
-
16
- # ──────────────────────────────── Environment Variables / Constants ─────────────────────────
17
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
18
- BRAVE_KEY = os.getenv("SERPHOUSE_API_KEY", "") # Keep this name
19
- BRAVE_ENDPOINT = "https://api.search.brave.com/res/v1/web/search"
20
- BRAVE_VIDEO_ENDPOINT = "https://api.search.brave.com/res/v1/videos/search"
21
- BRAVE_NEWS_ENDPOINT = "https://api.search.brave.com/res/v1/news/search"
22
- IMAGE_API_URL = "http://211.233.58.201:7896"
23
- MAX_TOKENS = 7999
24
- KAGGLE_API_KEY = os.getenv("KDATA_API", "")
25
-
26
- # Set Kaggle API key
27
- os.environ["KAGGLE_KEY"] = KAGGLE_API_KEY
28
-
29
- # Analysis modes and style definitions
30
- ANALYSIS_MODES = {
31
- "price_forecast": "농산물 가격 예츑과 μ‹œμž₯ 뢄석",
32
- "market_trend": "μ‹œμž₯ 동ν–₯ 및 μˆ˜μš” νŒ¨ν„΄ 뢄석",
33
- "production_analysis": "μƒμ‚°λŸ‰ 뢄석 및 μ‹λŸ‰ μ•ˆλ³΄ 전망",
34
- "agricultural_policy": "농업 μ •μ±… 및 규제 영ν–₯ 뢄석",
35
- "climate_impact": "κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 뢄석"
36
- }
37
-
38
- RESPONSE_STYLES = {
39
- "professional": "전문적이고 ν•™μˆ μ μΈ 뢄석",
40
- "simple": "μ‰½κ²Œ 이해할 수 μžˆλŠ” κ°„κ²°ν•œ μ„€λͺ…",
41
- "detailed": "μƒμ„Έν•œ 톡계 기반 깊이 μžˆλŠ” 뢄석",
42
- "action_oriented": "μ‹€ν–‰ κ°€λŠ₯ν•œ μ‘°μ–Έκ³Ό μΆ”μ²œ 쀑심"
43
- }
44
-
45
- # Example search queries
46
- EXAMPLE_QUERIES = {
47
- "example1": "μŒ€ 가격 μΆ”μ„Έ 및 ν–₯ν›„ 6κ°œμ›” 전망을 λΆ„μ„ν•΄μ£Όμ„Έμš”",
48
- "example2": "κΈ°ν›„ λ³€ν™”λ‘œ ν•œκ΅­ 과일 생산 μ „λž΅κ³Ό μˆ˜μš” 예츑 λ³΄κ³ μ„œλ₯Ό μž‘μ„±ν•˜λΌ.",
49
- "example3": "2025λ…„λΆ€ν„° 2030λ…„κΉŒμ§€ 좩뢁 μ¦ν‰κ΅°μ—μ„œ μž¬λ°°ν•˜λ©΄ μœ λ§ν•œ μž‘λ¬Όμ€? μˆ˜μ΅μ„±κ³Ό 관리성이 μ’‹μ•„μ•Όν•œλ‹€"
50
- }
51
-
52
- # ──────────────────────────────── Logging ────────────────────────────────
53
- logging.basicConfig(level=logging.INFO,
54
- format="%(asctime)s - %(levelname)s - %(message)s")
55
-
56
- # ──────────────────────────────── OpenAI Client ──────────────────────────
57
-
58
- @st.cache_resource
59
- def get_openai_client():
60
- if not OPENAI_API_KEY:
61
- raise RuntimeError("OPENAI_API_KEY ν™˜κ²½ λ³€μˆ˜κ°€ μ„€μ •λ˜μ§€ μ•Šμ•˜μŠ΅λ‹ˆλ‹€.")
62
- return OpenAI(
63
- api_key=OPENAI_API_KEY,
64
- timeout=60.0,
65
- max_retries=3
66
- )
67
-
68
- # ────────────────────────────── Kaggle Dataset Access ──────────────────────
69
- @st.cache_resource
70
- def load_agriculture_dataset():
71
-
72
- try:
73
- path = kagglehub.dataset_download("unitednations/global-food-agriculture-statistics")
74
- logging.info(f"Kaggle dataset downloaded to: {path}")
75
-
76
- # Load metadata about available files
77
- available_files = []
78
- for root, dirs, files in os.walk(path):
79
- for file in files:
80
- if file.endswith('.csv'):
81
- file_path = os.path.join(root, file)
82
- file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
83
- available_files.append({
84
- 'name': file,
85
- 'path': file_path,
86
- 'size_mb': round(file_size, 2)
87
- })
88
-
89
- return {
90
- 'base_path': path,
91
- 'files': available_files
92
- }
93
- except Exception as e:
94
- logging.error(f"Error loading Kaggle dataset: {e}")
95
- return None
96
-
97
- # New function to load Advanced Soybean Agricultural Dataset
98
- @st.cache_resource
99
- def load_soybean_dataset():
100
-
101
- try:
102
- path = kagglehub.dataset_download("wisam1985/advanced-soybean-agricultural-dataset-2025")
103
- logging.info(f"Soybean dataset downloaded to: {path}")
104
-
105
- available_files = []
106
- for root, dirs, files in os.walk(path):
107
- for file in files:
108
- if file.endswith(('.csv', '.xlsx')):
109
- file_path = os.path.join(root, file)
110
- file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
111
- available_files.append({
112
- 'name': file,
113
- 'path': file_path,
114
- 'size_mb': round(file_size, 2)
115
- })
116
-
117
- return {
118
- 'base_path': path,
119
- 'files': available_files
120
- }
121
- except Exception as e:
122
- logging.error(f"Error loading Soybean dataset: {e}")
123
- return None
124
-
125
- # Function to load Crop Recommendation Dataset
126
- @st.cache_resource
127
- def load_crop_recommendation_dataset():
128
-
129
- try:
130
- path = kagglehub.dataset_download("agriinnovate/agricultural-crop-dataset")
131
- logging.info(f"Crop recommendation dataset downloaded to: {path}")
132
-
133
- available_files = []
134
- for root, dirs, files in os.walk(path):
135
- for file in files:
136
- if file.endswith(('.csv', '.xlsx')):
137
- file_path = os.path.join(root, file)
138
- file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
139
- available_files.append({
140
- 'name': file,
141
- 'path': file_path,
142
- 'size_mb': round(file_size, 2)
143
- })
144
-
145
- return {
146
- 'base_path': path,
147
- 'files': available_files
148
- }
149
- except Exception as e:
150
- logging.error(f"Error loading Crop recommendation dataset: {e}")
151
- return None
152
-
153
- # Function to load Climate Change Impact Dataset
154
- @st.cache_resource
155
- def load_climate_impact_dataset():
156
 
 
157
  try:
158
- path = kagglehub.dataset_download("waqi786/climate-change-impact-on-agriculture")
159
- logging.info(f"Climate impact dataset downloaded to: {path}")
160
 
161
- available_files = []
162
- for root, dirs, files in os.walk(path):
163
- for file in files:
164
- if file.endswith(('.csv', '.xlsx')):
165
- file_path = os.path.join(root, file)
166
- file_size = os.path.getsize(file_path) / (1024 * 1024) # Size in MB
167
- available_files.append({
168
- 'name': file,
169
- 'path': file_path,
170
- 'size_mb': round(file_size, 2)
171
- })
172
 
173
- return {
174
- 'base_path': path,
175
- 'files': available_files
176
- }
177
- except Exception as e:
178
- logging.error(f"Error loading Climate impact dataset: {e}")
179
- return None
180
-
181
- def get_dataset_summary():
182
-
183
- dataset_info = load_agriculture_dataset()
184
- if not dataset_info:
185
- return "Failed to load the UN global food and agriculture statistics dataset."
186
-
187
- summary = "# UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋\n\n"
188
- summary += f"총 {len(dataset_info['files'])}개의 CSV 파일이 ν¬ν•¨λ˜μ–΄ μžˆμŠ΅λ‹ˆλ‹€.\n\n"
189
-
190
- # List files with sizes
191
- summary += "## μ‚¬μš© κ°€λŠ₯ν•œ 데이터 파일:\n\n"
192
- for i, file_info in enumerate(dataset_info['files'][:10], 1): # Limit to first 10 files
193
- summary += f"{i}. **{file_info['name']}** ({file_info['size_mb']} MB)\n"
194
-
195
- if len(dataset_info['files']) > 10:
196
- summary += f"\n...μ™Έ {len(dataset_info['files']) - 10}개 파일\n"
197
-
198
- # Add example of data structure
199
- try:
200
- if dataset_info['files']:
201
- sample_file = dataset_info['files'][0]['path']
202
- df = pd.read_csv(sample_file, nrows=5)
203
- summary += "\n## 데이터 μƒ˜ν”Œ ꡬ쑰:\n\n"
204
- summary += df.head(5).to_markdown() + "\n\n"
205
-
206
- summary += "## 데이터셋 λ³€μˆ˜ μ„€λͺ…:\n\n"
207
- for col in df.columns:
208
- summary += f"- **{col}**: [λ³€μˆ˜ μ„€λͺ… ν•„μš”]\n"
209
- except Exception as e:
210
- logging.error(f"Error generating dataset sample: {e}")
211
- summary += "\n데이터 μƒ˜ν”Œμ„ μƒμ„±ν•˜λŠ” 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€.\n"
212
-
213
- return summary
214
-
215
- def analyze_dataset_for_query(query):
216
-
217
- dataset_info = load_agriculture_dataset()
218
- if not dataset_info:
219
- return "데이터셋을 뢈러올 수 μ—†μŠ΅λ‹ˆλ‹€. Kaggle API 연결을 ν™•μΈν•΄μ£Όμ„Έμš”."
220
-
221
- # Extract key terms from the query
222
- query_lower = query.lower()
223
-
224
- # Define keywords to look for in the dataset files
225
- keywords = {
226
- "μŒ€": ["rice", "grain"],
227
- "λ°€": ["wheat", "grain"],
228
- "μ˜₯수수": ["corn", "maize", "grain"],
229
- "μ±„μ†Œ": ["vegetable", "produce"],
230
- "과일": ["fruit", "produce"],
231
- "가격": ["price", "cost", "value"],
232
- "생산": ["production", "yield", "harvest"],
233
- "수좜": ["export", "trade"],
234
- "μˆ˜μž…": ["import", "trade"],
235
- "μ†ŒλΉ„": ["consumption", "demand"]
236
- }
237
-
238
- # Find relevant files based on the query
239
- relevant_files = []
240
-
241
- # First check for Korean keywords in the query
242
- found_keywords = []
243
- for k_term, e_terms in keywords.items():
244
- if k_term in query_lower:
245
- found_keywords.extend([k_term] + e_terms)
246
-
247
- # If no Korean keywords found, check for English terms in the filenames
248
- if not found_keywords:
249
- # Generic search through all files
250
- relevant_files = dataset_info['files'][:5] # Take first 5 files as default
251
- else:
252
- # Search for files related to the found keywords
253
- for file_info in dataset_info['files']:
254
- file_name_lower = file_info['name'].lower()
255
- for keyword in found_keywords:
256
- if keyword.lower() in file_name_lower:
257
- relevant_files.append(file_info)
258
- break
259
-
260
- # If still no relevant files, take the first 5 files
261
- if not relevant_files:
262
- relevant_files = dataset_info['files'][:5]
263
-
264
- # Read and analyze the relevant files
265
- analysis_result = "# 농업 데이터 뢄석 κ²°κ³Ό\n\n"
266
- analysis_result += f"쿼리: '{query}'에 λŒ€ν•œ 뢄석을 μˆ˜ν–‰ν–ˆμŠ΅λ‹ˆλ‹€.\n\n"
267
-
268
- if found_keywords:
269
- analysis_result += f"## 뢄석 ν‚€μ›Œλ“œ: {', '.join(set(found_keywords))}\n\n"
270
-
271
- # Process each relevant file
272
- for file_info in relevant_files[:3]: # Limit to 3 files for performance
273
  try:
274
- analysis_result += f"## 파일: {file_info['name']}\n\n"
275
-
276
- # Read the CSV file
277
- df = pd.read_csv(file_info['path'])
278
-
279
- # Basic file stats
280
- analysis_result += f"- ν–‰ 수: {len(df)}\n"
281
- analysis_result += f"- μ—΄ 수: {len(df.columns)}\n"
282
- analysis_result += f"- μ—΄ λͺ©λ‘: {', '.join(df.columns.tolist())}\n\n"
283
 
284
- # Sample data
285
- analysis_result += "### 데이터 μƒ˜ν”Œ:\n\n"
286
- analysis_result += df.head(5).to_markdown() + "\n\n"
287
-
288
- # Statistical summary of numeric columns
289
- numeric_cols = df.select_dtypes(include=['number']).columns
290
- if len(numeric_cols) > 0:
291
- analysis_result += "### κΈ°λ³Έ 톡계:\n\n"
292
- stats_df = df[numeric_cols].describe()
293
- analysis_result += stats_df.to_markdown() + "\n\n"
294
-
295
- # Time series analysis if possible
296
- time_cols = [col for col in df.columns if 'year' in col.lower() or 'date' in col.lower()]
297
- if time_cols:
298
- analysis_result += "### μ‹œκ³„μ—΄ νŒ¨ν„΄:\n\n"
299
- analysis_result += "데이터셋에 μ‹œκ°„ κ΄€λ ¨ 열이 μžˆμ–΄ μ‹œκ³„μ—΄ 뢄석이 κ°€λŠ₯ν•©λ‹ˆλ‹€.\n\n"
300
-
301
- except Exception as e:
302
- logging.error(f"Error analyzing file {file_info['name']}: {e}")
303
- analysis_result += f"이 파일 뢄석 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {str(e)}\n\n"
304
-
305
- analysis_result += "## 농산물 가격 예츑 및 μˆ˜μš” 뢄석에 λŒ€ν•œ μΈμ‚¬μ΄νŠΈ\n\n"
306
- analysis_result += "λ°μ΄ν„°μ…‹μ—μ„œ μΆ”μΆœν•œ 정보λ₯Ό λ°”νƒ•μœΌλ‘œ λ‹€μŒ μΈμ‚¬μ΄νŠΈλ₯Ό μ œκ³΅ν•©λ‹ˆλ‹€:\n\n"
307
- analysis_result += "1. 데이터 기반 뢄석 (기본적인 μš”μ•½)\n"
308
- analysis_result += "2. μ£Όμš” 가격 및 μˆ˜μš” 동ν–₯\n"
309
- analysis_result += "3. μƒμ‚°λŸ‰ 및 무역 νŒ¨ν„΄\n\n"
310
-
311
- analysis_result += "이 뢄석은 UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋을 기반으둜 ν•©λ‹ˆλ‹€.\n\n"
312
-
313
- return analysis_result
314
-
315
- # Function to analyze crop recommendation dataset
316
- def analyze_crop_recommendation_dataset(query):
317
-
318
- try:
319
- dataset_info = load_crop_recommendation_dataset()
320
- if not dataset_info or not dataset_info['files']:
321
- return "μž‘λ¬Ό μΆ”μ²œ 데이터셋을 뢈러올 수 μ—†μŠ΅λ‹ˆλ‹€."
322
 
323
- analysis_result = "# ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 기반 μž‘λ¬Ό μΆ”μ²œ 데이터 뢄석\n\n"
 
 
 
324
 
325
- # Process main files
326
- for file_info in dataset_info['files'][:2]: # Limit to the first 2 files
327
- try:
328
- analysis_result += f"## 파일: {file_info['name']}\n\n"
329
-
330
- if file_info['name'].endswith('.csv'):
331
- df = pd.read_csv(file_info['path'])
332
- elif file_info['name'].endswith('.xlsx'):
333
- df = pd.read_excel(file_info['path'])
334
- else:
335
- continue
336
-
337
- # Basic dataset info
338
- analysis_result += f"- 데이터 크기: {len(df)} ν–‰ Γ— {len(df.columns)} μ—΄\n"
339
- analysis_result += f"- ν¬ν•¨λœ μž‘λ¬Ό μ’…λ₯˜: "
340
-
341
- # Check if crop column exists
342
- crop_cols = [col for col in df.columns if 'crop' in col.lower() or 'μž‘λ¬Ό' in col.lower()]
343
- if crop_cols:
344
- main_crop_col = crop_cols[0]
345
- unique_crops = df[main_crop_col].unique()
346
- analysis_result += f"{len(unique_crops)}μ’… ({', '.join(str(c) for c in unique_crops[:10])})\n\n"
347
- else:
348
- analysis_result += "μž‘λ¬Ό 정보 열을 찾을 수 μ—†μŒ\n\n"
349
-
350
- # Extract environmental factors
351
- env_factors = [col for col in df.columns if col.lower() not in ['crop', 'label', 'id', 'index']]
352
- if env_factors:
353
- analysis_result += f"- 고렀된 ν™˜κ²½ μš”μ†Œ: {', '.join(env_factors)}\n\n"
354
-
355
- # Sample data
356
- analysis_result += "### 데이터 μƒ˜ν”Œ:\n\n"
357
- analysis_result += df.head(5).to_markdown() + "\n\n"
358
-
359
- # Summary statistics for environmental factors
360
- if env_factors:
361
- numeric_factors = df[env_factors].select_dtypes(include=['number']).columns
362
- if len(numeric_factors) > 0:
363
- analysis_result += "### ν™˜κ²½ μš”μ†Œ 톡계:\n\n"
364
- stats_df = df[numeric_factors].describe().round(2)
365
- analysis_result += stats_df.to_markdown() + "\n\n"
366
-
367
- # Check for query-specific crops
368
- query_terms = query.lower().split()
369
- relevant_crops = []
370
-
371
- if crop_cols:
372
- for crop in df[main_crop_col].unique():
373
- crop_str = str(crop).lower()
374
- if any(term in crop_str for term in query_terms):
375
- relevant_crops.append(crop)
376
-
377
- if relevant_crops:
378
- analysis_result += f"### 쿼리 κ΄€λ ¨ μž‘λ¬Ό 뢄석: {', '.join(str(c) for c in relevant_crops)}\n\n"
379
- for crop in relevant_crops[:3]: # Limit to 3 crops
380
- crop_data = df[df[main_crop_col] == crop]
381
- analysis_result += f"#### {crop} μž‘λ¬Ό μš”μ•½:\n\n"
382
- analysis_result += f"- μƒ˜ν”Œ 수: {len(crop_data)}개\n"
383
-
384
- if len(numeric_factors) > 0:
385
- crop_stats = crop_data[numeric_factors].describe().round(2)
386
- analysis_result += f"- 평균 ν™˜κ²½ 쑰건:\n"
387
- for factor in numeric_factors[:5]: # Limit to 5 factors
388
- analysis_result += f" * {factor}: {crop_stats.loc['mean', factor]}\n"
389
- analysis_result += "\n"
390
-
391
- except Exception as e:
392
- logging.error(f"Error analyzing crop recommendation file {file_info['name']}: {e}")
393
- analysis_result += f"뢄석 였λ₯˜: {str(e)}\n\n"
394
 
395
- analysis_result += "## μž‘λ¬Ό μΆ”μ²œ μΈμ‚¬μ΄νŠΈ\n\n"
396
- analysis_result += "ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 데이터셋 뢄석 κ²°κ³Ό, λ‹€μŒκ³Ό 같은 μ£Όμš” μΈμ‚¬μ΄νŠΈλ₯Ό μ œκ³΅ν•©λ‹ˆλ‹€:\n\n"
397
- analysis_result += "1. μ§€μ—­ ν™˜κ²½μ— μ ν•©ν•œ μž‘λ¬Ό μΆ”μ²œ\n"
398
- analysis_result += "2. μž‘λ¬Ό 생산성에 영ν–₯을 λ―ΈμΉ˜λŠ” μ£Όμš” ν™˜κ²½ μš”μΈ\n"
399
- analysis_result += "3. 지속 κ°€λŠ₯ν•œ 농업을 μœ„ν•œ 졜적의 μž‘λ¬Ό 선택 κΈ°μ€€\n\n"
400
-
401
- return analysis_result
402
-
403
- except Exception as e:
404
- logging.error(f"Crop recommendation dataset analysis error: {e}")
405
- return "μž‘λ¬Ό μΆ”μ²œ 데이터셋 뢄석 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€."
406
-
407
- # Function to analyze climate impact dataset
408
- def analyze_climate_impact_dataset(query):
409
-
410
- try:
411
- dataset_info = load_climate_impact_dataset()
412
- if not dataset_info or not dataset_info['files']:
413
- return "κΈ°ν›„ λ³€ν™” 영ν–₯ 데이터셋을 뢈러올 수 μ—†μŠ΅λ‹ˆλ‹€."
414
-
415
- analysis_result = "# κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 데이터 뢄석\n\n"
416
-
417
- # Process main files
418
- for file_info in dataset_info['files'][:2]: # Limit to first 2 files
419
- try:
420
- analysis_result += f"## 파일: {file_info['name']}\n\n"
421
-
422
- if file_info['name'].endswith('.csv'):
423
- df = pd.read_csv(file_info['path'])
424
- elif file_info['name'].endswith('.xlsx'):
425
- df = pd.read_excel(file_info['path'])
426
- else:
427
- continue
428
-
429
- # Basic dataset info
430
- analysis_result += f"- 데이터 크기: {len(df)} ν–‰ Γ— {len(df.columns)} μ—΄\n"
431
-
432
- # Check for region column
433
- region_cols = [col for col in df.columns if 'region' in col.lower() or 'country' in col.lower() or 'μ§€μ—­' in col.lower()]
434
- if region_cols:
435
- main_region_col = region_cols[0]
436
- regions = df[main_region_col].unique()
437
- analysis_result += f"- ν¬ν•¨λœ μ§€μ—­: {len(regions)}개 ({', '.join(str(r) for r in regions[:5])})\n"
438
-
439
- # Identify climate and crop related columns
440
- climate_cols = [col for col in df.columns if any(term in col.lower() for term in
441
- ['temp', 'rainfall', 'precipitation', 'climate', 'weather', '기온', 'κ°•μˆ˜λŸ‰'])]
442
- crop_cols = [col for col in df.columns if any(term in col.lower() for term in
443
- ['yield', 'production', 'crop', 'harvest', 'μˆ˜ν™•λŸ‰', 'μƒμ‚°λŸ‰'])]
444
-
445
- if climate_cols:
446
- analysis_result += f"- κΈ°ν›„ κ΄€λ ¨ λ³€μˆ˜: {', '.join(climate_cols)}\n"
447
- if crop_cols:
448
- analysis_result += f"- μž‘λ¬Ό κ΄€λ ¨ λ³€μˆ˜: {', '.join(crop_cols)}\n\n"
449
-
450
- # Sample data
451
- analysis_result += "### 데이터 μƒ˜ν”Œ:\n\n"
452
- analysis_result += df.head(5).to_markdown() + "\n\n"
453
-
454
- # Time series pattern if available
455
- year_cols = [col for col in df.columns if 'year' in col.lower() or 'date' in col.lower() or '연도' in col.lower()]
456
- if year_cols:
457
- analysis_result += "### μ‹œκ³„μ—΄ κΈ°ν›„ 영ν–₯ νŒ¨ν„΄:\n\n"
458
- analysis_result += "이 데이터셋은 μ‹œκ°„μ— λ”°λ₯Έ κΈ°ν›„ 변화와 농업 생산성 κ°„μ˜ 관계λ₯Ό 뢄석할 수 μžˆμŠ΅λ‹ˆλ‹€.\n\n"
459
-
460
- # Statistical summary of key variables
461
- key_vars = climate_cols + crop_cols
462
- numeric_vars = df[key_vars].select_dtypes(include=['number']).columns
463
- if len(numeric_vars) > 0:
464
- analysis_result += "### μ£Όμš” λ³€μˆ˜ 톡계:\n\n"
465
- stats_df = df[numeric_vars].describe().round(2)
466
- analysis_result += stats_df.to_markdown() + "\n\n"
467
-
468
- # Check for correlations between climate and crop variables
469
- if len(climate_cols) > 0 and len(crop_cols) > 0:
470
- numeric_climate = df[climate_cols].select_dtypes(include=['number']).columns
471
- numeric_crop = df[crop_cols].select_dtypes(include=['number']).columns
472
-
473
- if len(numeric_climate) > 0 and len(numeric_crop) > 0:
474
- analysis_result += "### 기후와 μž‘λ¬Ό 생산 κ°„μ˜ 상관관계:\n\n"
475
- try:
476
- corr_vars = list(numeric_climate)[:2] + list(numeric_crop)[:2] # Limit to 2 of each type
477
- corr_df = df[corr_vars].corr().round(3)
478
- analysis_result += corr_df.to_markdown() + "\n\n"
479
- analysis_result += "μœ„ 상관관계 ν‘œλŠ” κΈ°ν›„ λ³€μˆ˜μ™€ μž‘λ¬Ό 생산성 κ°„μ˜ 관계 강도λ₯Ό λ³΄μ—¬μ€λ‹ˆλ‹€.\n\n"
480
- except:
481
- analysis_result += "상관관계 계산 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€.\n\n"
482
-
483
- except Exception as e:
484
- logging.error(f"Error analyzing climate impact file {file_info['name']}: {e}")
485
- analysis_result += f"뢄석 였λ₯˜: {str(e)}\n\n"
486
-
487
- analysis_result += "## κΈ°ν›„ λ³€ν™” 영ν–₯ μΈμ‚¬μ΄νŠΈ\n\n"
488
- analysis_result += "κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 데이터 뢄석 κ²°κ³Ό, λ‹€μŒκ³Ό 같은 μΈμ‚¬μ΄νŠΈλ₯Ό μ œκ³΅ν•©λ‹ˆλ‹€:\n\n"
489
- analysis_result += "1. 기온 변화에 λ”°λ₯Έ μž‘λ¬Ό 생산성 변동 νŒ¨ν„΄\n"
490
- analysis_result += "2. κ°•μˆ˜λŸ‰ λ³€ν™”κ°€ 농업 μˆ˜ν™•λŸ‰μ— λ―ΈμΉ˜λŠ” 영ν–₯\n"
491
- analysis_result += "3. κΈ°ν›„ 변화에 λŒ€μ‘ν•˜κΈ° μœ„ν•œ 농업 μ „λž΅ μ œμ•ˆ\n"
492
- analysis_result += "4. 지역별 κΈ°ν›„ μ·¨μ•½μ„± 및 적응 λ°©μ•ˆ\n\n"
493
-
494
- return analysis_result
495
-
496
- except Exception as e:
497
- logging.error(f"Climate impact dataset analysis error: {e}")
498
- return "κΈ°ν›„ λ³€ν™” 영ν–₯ 데이터셋 뢄석 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€."
499
-
500
- # Function to analyze soybean dataset if selected
501
- def analyze_soybean_dataset(query):
502
-
503
- try:
504
- dataset_info = load_soybean_dataset()
505
- if not dataset_info or not dataset_info['files']:
506
- return "λŒ€λ‘ 농업 데이터셋을 뢈러올 수 μ—†μŠ΅λ‹ˆλ‹€."
507
-
508
- analysis_result = "# κ³ κΈ‰ λŒ€λ‘ 농업 데이터 뢄석\n\n"
509
-
510
- # Process main files
511
- for file_info in dataset_info['files'][:2]: # Limit to the first 2 files
512
- try:
513
- analysis_result += f"## 파일: {file_info['name']}\n\n"
514
-
515
- if file_info['name'].endswith('.csv'):
516
- df = pd.read_csv(file_info['path'])
517
- elif file_info['name'].endswith('.xlsx'):
518
- df = pd.read_excel(file_info['path'])
519
- else:
520
- continue
521
-
522
- # Basic file stats
523
- analysis_result += f"- 데이터 크기: {len(df)} ν–‰ Γ— {len(df.columns)} μ—΄\n"
524
-
525
- # Check for region/location columns
526
- location_cols = [col for col in df.columns if any(term in col.lower() for term in
527
- ['region', 'location', 'area', 'country', 'μ§€μ—­'])]
528
- if location_cols:
529
- main_loc_col = location_cols[0]
530
- locations = df[main_loc_col].unique()
531
- analysis_result += f"- ν¬ν•¨λœ μ§€μ—­: {len(locations)}개 ({', '.join(str(loc) for loc in locations[:5])})\n"
532
-
533
- # Identify yield and production columns
534
- yield_cols = [col for col in df.columns if any(term in col.lower() for term in
535
- ['yield', 'production', 'harvest', 'μˆ˜ν™•λŸ‰', 'μƒμ‚°λŸ‰'])]
536
- if yield_cols:
537
- analysis_result += f"- 생산성 κ΄€λ ¨ λ³€μˆ˜: {', '.join(yield_cols)}\n"
538
-
539
- # Identify environmental factors
540
- env_cols = [col for col in df.columns if any(term in col.lower() for term in
541
- ['temp', 'rainfall', 'soil', 'fertilizer', 'nutrient', 'irrigation',
542
- '기온', 'κ°•μˆ˜λŸ‰', 'ν† μ–‘', 'λΉ„λ£Œ', 'κ΄€κ°œ'])]
543
- if env_cols:
544
- analysis_result += f"- ν™˜κ²½ κ΄€λ ¨ λ³€μˆ˜: {', '.join(env_cols)}\n\n"
545
-
546
- # Sample data
547
- analysis_result += "### 데이터 μƒ˜ν”Œ:\n\n"
548
- analysis_result += df.head(5).to_markdown() + "\n\n"
549
-
550
- # Statistical summary of key variables
551
- key_vars = yield_cols + env_cols
552
- numeric_vars = df[key_vars].select_dtypes(include=['number']).columns
553
- if len(numeric_vars) > 0:
554
- analysis_result += "### μ£Όμš” λ³€μˆ˜ 톡계:\n\n"
555
- stats_df = df[numeric_vars].describe().round(2)
556
- analysis_result += stats_df.to_markdown() + "\n\n"
557
-
558
- # Time series analysis if possible
559
- year_cols = [col for col in df.columns if 'year' in col.lower() or 'date' in col.lower() or '연도' in col.lower()]
560
- if year_cols:
561
- analysis_result += "### μ‹œκ³„μ—΄ 생산성 νŒ¨ν„΄:\n\n"
562
- analysis_result += "이 데이터셋은 μ‹œκ°„μ— λ”°λ₯Έ λŒ€λ‘ μƒμ‚°μ„±μ˜ λ³€ν™”λ₯Ό 좔적할 수 μžˆμŠ΅λ‹ˆλ‹€.\n\n"
563
-
564
- # Check for correlations between environmental factors and yield
565
- if len(env_cols) > 0 and len(yield_cols) > 0:
566
- numeric_env = df[env_cols].select_dtypes(include=['number']).columns
567
- numeric_yield = df[yield_cols].select_dtypes(include=['number']).columns
568
-
569
- if len(numeric_env) > 0 and len(numeric_yield) > 0:
570
- analysis_result += "### ν™˜κ²½ μš”μ†Œμ™€ λŒ€λ‘ 생산성 κ°„μ˜ 상관관계:\n\n"
571
- try:
572
- corr_vars = list(numeric_env)[:3] + list(numeric_yield)[:2] # Limit variables
573
- corr_df = df[corr_vars].corr().round(3)
574
- analysis_result += corr_df.to_markdown() + "\n\n"
575
- except:
576
- analysis_result += "상관관계 계산 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€.\n\n"
577
-
578
- except Exception as e:
579
- logging.error(f"Error analyzing soybean file {file_info['name']}: {e}")
580
- analysis_result += f"뢄석 였λ₯˜: {str(e)}\n\n"
581
-
582
- analysis_result += "## λŒ€λ‘ 농업 μΈμ‚¬μ΄νŠΈ\n\n"
583
- analysis_result += "κ³ κΈ‰ λŒ€λ‘ 농업 데이터셋 뢄석 κ²°κ³Ό, λ‹€μŒκ³Ό 같은 μΈμ‚¬μ΄νŠΈλ₯Ό μ œκ³΅ν•©λ‹ˆλ‹€:\n\n"
584
- analysis_result += "1. 졜적의 λŒ€λ‘ 생산을 μœ„ν•œ ν™˜κ²½ 쑰건\n"
585
- analysis_result += "2. 지역별 λŒ€λ‘ 생산성 λ³€ν™” νŒ¨ν„΄\n"
586
- analysis_result += "3. 생산성 ν–₯상을 μœ„ν•œ 농업 기술 및 접근법\n"
587
- analysis_result += "4. μ‹œμž₯ μˆ˜μš”μ— λ§žλŠ” λŒ€λ‘ ν’ˆμ’… 선택 κ°€μ΄λ“œ\n\n"
588
-
589
- return analysis_result
590
-
591
- except Exception as e:
592
- logging.error(f"Soybean dataset analysis error: {e}")
593
- return "λŒ€λ‘ 농업 데이터셋 뢄석 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€."
594
-
595
- # ──────────��───────────────────── System Prompt ─────────────────────────
596
- def get_system_prompt(mode="price_forecast", style="professional", include_search_results=True, include_uploaded_files=False) -> str:
597
-
598
- base_prompt = """
599
- 당신은 농업 데이터 μ „λ¬Έκ°€λ‘œμ„œ 농산물 가격 예츑과 μˆ˜μš” 뢄석을 μˆ˜ν–‰ν•˜λŠ” AI μ–΄μ‹œμŠ€ν„΄νŠΈμž…λ‹ˆλ‹€.
600
-
601
- μ£Όμš” μž„λ¬΄:
602
- 1. UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋을 기반으둜 농산물 μ‹œμž₯ 뢄석
603
- 2. 농산물 가격 μΆ”μ„Έ 예츑 및 μˆ˜μš” νŒ¨ν„΄ 뢄석
604
- 3. 데이터λ₯Ό λ°”νƒ•μœΌλ‘œ λͺ…ν™•ν•˜κ³  κ·Όκ±° μžˆλŠ” 뢄석 제곡
605
- 4. κ΄€λ ¨ 정보와 μΈμ‚¬μ΄νŠΈλ₯Ό μ²΄κ³„μ μœΌλ‘œ κ΅¬μ„±ν•˜μ—¬ μ œμ‹œ
606
- 5. μ‹œκ°μ  이해λ₯Ό 돕기 μœ„ν•΄ 차트, κ·Έλž˜ν”„ 등을 적절히 ν™œμš©
607
- 6. ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 기반 μž‘λ¬Ό μΆ”μ²œ λ°μ΄ν„°μ…‹μ—μ„œ μΆ”μΆœν•œ μΈμ‚¬μ΄νŠΈ 적용
608
- 7. κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 데이터셋을 ν†΅ν•œ ν™˜κ²½ λ³€ν™” μ‹œλ‚˜λ¦¬μ˜€ 뢄석
609
-
610
- μ€‘μš” κ°€μ΄λ“œλΌμΈ:
611
- - 데이터에 κΈ°λ°˜ν•œ 객관적 뢄석을 μ œκ³΅ν•˜μ„Έμš”
612
- - 뢄석 κ³Όμ •κ³Ό 방법둠을 λͺ…ν™•νžˆ μ„€λͺ…ν•˜μ„Έμš”
613
- - 톡계적 μ‹ λ’°μ„±κ³Ό ν•œκ³„μ μ„ 투λͺ…ν•˜κ²Œ μ œμ‹œν•˜μ„Έμš”
614
- - μ΄ν•΄ν•˜κΈ° μ‰¬μš΄ μ‹œκ°μ  μš”μ†Œλ‘œ 뢄석 κ²°κ³Όλ₯Ό λ³΄μ™„ν•˜μ„Έμš”
615
- - λ§ˆν¬λ‹€μš΄μ„ ν™œμš©ν•΄ 응닡을 μ²΄κ³„μ μœΌλ‘œ κ΅¬μ„±ν•˜μ„Έμš”"""
616
-
617
- mode_prompts = {
618
- "price_forecast": """
619
- 농산물 가격 예츑 및 μ‹œμž₯ 뢄석에 μ§‘μ€‘ν•©λ‹ˆλ‹€:
620
- - κ³Όκ±° 가격 데이터 νŒ¨ν„΄μ— κΈ°λ°˜ν•œ 예츑 제곡
621
- - 가격 변동성 μš”μΈ 뢄석(κ³„μ ˆμ„±, 날씨, μ •μ±… λ“±)
622
- - 단기 및 쀑μž₯κΈ° 가격 전망 μ œμ‹œ
623
- - 가격에 영ν–₯을 λ―ΈμΉ˜λŠ” κ΅­λ‚΄μ™Έ μš”μΈ 식별
624
- - μ‹œμž₯ λΆˆν™•μ‹€μ„±κ³Ό 리슀크 μš”μ†Œ κ°•μ‘°""",
625
-
626
- "market_trend": """
627
- μ‹œμž₯ 동ν–₯ 및 μˆ˜μš” νŒ¨ν„΄ 뢄석에 μ§‘μ€‘ν•©λ‹ˆλ‹€:
628
- - μ£Όμš” 농산물 μˆ˜μš” λ³€ν™” νŒ¨ν„΄ 식별
629
- - μ†ŒλΉ„μž μ„ ν˜Έλ„ 및 ꡬ맀 행동 뢄석
630
- - μ‹œμž₯ μ„Έκ·Έλ¨ΌνŠΈ 및 ν‹ˆμƒˆμ‹œμž₯ 기회 탐색
631
- - μ‹œμž₯ ν™•λŒ€/μΆ•μ†Œ νŠΈλ Œλ“œ 평가
632
- - μˆ˜μš” 탄λ ₯μ„± 및 가격 민감도 뢄석""",
633
-
634
- "production_analysis": """
635
- μƒμ‚°λŸ‰ 뢄석 및 μ‹λŸ‰ μ•ˆλ³΄ 전망에 μ§‘μ€‘ν•©λ‹ˆλ‹€:
636
- - μž‘λ¬Ό μƒμ‚°λŸ‰ μΆ”μ„Έ 및 변동 μš”μΈ 뢄석
637
- - μ‹λŸ‰ 생산과 인ꡬ μ„±μž₯ κ°„μ˜ 관계 평가
638
- - κ΅­κ°€/지역별 생산 μ—­λŸ‰ 비ꡐ
639
- - μ‹λŸ‰ μ•ˆλ³΄ μœ„ν˜‘ μš”μ†Œ 및 취약점 식별
640
- - 생산성 ν–₯상 μ „λž΅ 및 기회 μ œμ•ˆ""",
641
-
642
- "agricultural_policy": """
643
- 농업 μ •μ±… 및 규제 영ν–₯ 뢄석에 μ§‘μ€‘ν•©λ‹ˆλ‹€:
644
- - μ •λΆ€ μ •μ±…κ³Ό, 보쑰금, 규제의 μ‹œμž₯ 영ν–₯ 뢄석
645
- - ꡭ제 무역 μ •μ±…κ³Ό κ΄€μ„Έμ˜ 농산물 가격 영ν–₯ 평가
646
- - 농업 지원 ν”„λ‘œκ·Έλž¨μ˜ νš¨κ³Όμ„± κ²€ν† 
647
- - 규제 ν™˜κ²½ 변화에 λ”°λ₯Έ μ‹œμž₯ μ‘°μ • 예츑
648
- - 정책적 κ°œμž…μ˜ μ˜λ„λœ/μ˜λ„μΉ˜ μ•Šμ€ κ²°κ³Ό 뢄석""",
649
-
650
- "climate_impact": """
651
- κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 뢄석에 μ§‘μ€‘ν•©λ‹ˆλ‹€:
652
- - κΈ°ν›„ 변화와 농산물 μƒμ‚°λŸ‰/ν’ˆμ§ˆ κ°„μ˜ 상관관계 뢄석
653
- - 기상 이변이 가격 변동성에 λ―ΈμΉ˜λŠ” 영ν–₯ 평가
654
- - μž₯기적 κΈ°ν›„ 좔세에 λ”°λ₯Έ 농업 νŒ¨ν„΄ λ³€ν™” 예츑
655
- - κΈ°ν›„ 회볡λ ₯ μžˆλŠ” 농업 μ‹œμŠ€ν…œ μ „λž΅ μ œμ•ˆ
656
- - 지역별 κΈ°ν›„ μœ„ν—˜ λ…ΈμΆœλ„ 및 μ·¨μ•½μ„± λ§€ν•‘"""
657
-
658
- }
659
-
660
- style_guides = {
661
- "professional": "전문적이고 ν•™μˆ μ μΈ μ–΄μ‘°λ₯Ό μ‚¬μš©ν•˜μ„Έμš”. 기술적 μš©μ–΄λ₯Ό 적절히 μ‚¬μš©ν•˜κ³  체계적인 데이터 뢄석을 μ œκ³΅ν•˜μ„Έμš”.",
662
- "simple": "쉽고 κ°„κ²°ν•œ μ–Έμ–΄λ‘œ μ„€λͺ…ν•˜μ„Έμš”. μ „λ¬Έ μš©μ–΄λŠ” μ΅œμ†Œν™”ν•˜κ³  핡심 κ°œλ…μ„ 일상적인 ν‘œν˜„μœΌλ‘œ μ „λ‹¬ν•˜μ„Έμš”.",
663
- "detailed": "μƒμ„Έν•˜κ³  포괄적인 뢄석을 μ œκ³΅ν•˜μ„Έμš”. λ‹€μ–‘ν•œ 데이터 포인트, 톡계적 λ‰˜μ•™μŠ€, 그리고 μ—¬λŸ¬ μ‹œλ‚˜λ¦¬μ˜€λ₯Ό κ³ λ €ν•œ 심측 뢄석을 μ œμ‹œν•˜μ„Έμš”.",
664
- "action_oriented": "μ‹€ν–‰ κ°€λŠ₯ν•œ μΈμ‚¬μ΄νŠΈμ™€ ꡬ체적인 ꢌμž₯사항에 μ΄ˆμ μ„ λ§žμΆ”μ„Έμš”. 'λ‹€μŒ 단계' 및 'μ‹€μ§ˆμ  μ‘°μ–Έ' μ„Ήμ…˜μ„ ν¬ν•¨ν•˜μ„Έμš”."
665
- }
666
-
667
- dataset_guide = """
668
- 농업 데이터셋 ν™œμš© μ§€μΉ¨:
669
- - UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋을 κΈ°λ³Έ λΆ„μ„μ˜ 근거둜 μ‚¬μš©ν•˜μ„Έμš”
670
- - ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 기반 μž‘λ¬Ό μΆ”μ²œ λ°μ΄ν„°μ…‹μ˜ μΈμ‚¬μ΄νŠΈλ₯Ό μž‘λ¬Ό 선택 및 재배 쑰건 뢄석에 ν†΅ν•©ν•˜μ„Έμš”
671
- - κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ λ°μ΄ν„°μ…‹μ˜ 정보λ₯Ό 지속 κ°€λŠ₯μ„± 및 미래 전망 뢄석에 ν™œμš©ν•˜μ„Έμš”
672
- - λ°μ΄ν„°μ˜ μΆœμ²˜μ™€ 연도λ₯Ό λͺ…ν™•νžˆ μΈμš©ν•˜μ„Έμš”
673
- - 데이터셋 λ‚΄ μ£Όμš” λ³€μˆ˜ κ°„μ˜ 관계λ₯Ό λΆ„μ„ν•˜μ—¬ μΈμ‚¬μ΄νŠΈλ₯Ό λ„μΆœν•˜μ„Έμš”
674
- - λ°μ΄ν„°μ˜ ν•œκ³„μ™€ λΆˆν™•μ‹€μ„±μ„ 투λͺ…ν•˜κ²Œ μ–ΈκΈ‰ν•˜μ„Έμš”
675
- - ν•„μš”μ‹œ 데이터 격차λ₯Ό μ‹λ³„ν•˜κ³  μΆ”κ°€ 연ꡬ가 ν•„μš”ν•œ μ˜μ—­μ„ μ œμ•ˆν•˜μ„Έμš”"""
676
-
677
- soybean_guide = """
678
- κ³ κΈ‰ λŒ€λ‘ 농업 데이터셋 ν™œμš© μ§€μΉ¨:
679
- - λŒ€λ‘ 생산 쑰건 및 μˆ˜ν™•λŸ‰ νŒ¨ν„΄μ„ λ‹€λ₯Έ μž‘λ¬Όκ³Ό λΉ„κ΅ν•˜μ—¬ λΆ„μ„ν•˜μ„Έμš”
680
- - λŒ€λ‘ λ†μ—…μ˜ 경제적 κ°€μΉ˜μ™€ μ‹œμž₯ κΈ°νšŒμ— λŒ€ν•œ μΈμ‚¬μ΄νŠΈλ₯Ό μ œκ³΅ν•˜μ„Έμš”
681
- - λŒ€λ‘ 생산성에 영ν–₯을 λ―ΈμΉ˜λŠ” μ£Όμš” ν™˜κ²½ μš”μΈμ„ κ°•μ‘°ν•˜μ„Έμš”
682
- - λŒ€λ‘ 재배 기술 ν˜μ‹ κ³Ό μˆ˜μ΅μ„± ν–₯상 λ°©μ•ˆμ„ μ œμ•ˆν•˜μ„Έμš”
683
- - 지속 κ°€λŠ₯ν•œ λŒ€λ‘ 농업을 μœ„ν•œ μ‹€μ§ˆμ μΈ 접근법을 κ³΅μœ ν•˜μ„Έμš”"""
684
-
685
- crop_recommendation_guide = """
686
- ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 기반 μž‘λ¬Ό μΆ”μ²œ ν™œμš© μ§€μΉ¨:
687
- - μ§€μ—­ νŠΉμ„±μ— λ§žλŠ” 졜적의 μž‘λ¬Ό 선택 기쀀을 μ œμ‹œν•˜μ„Έμš”
688
- - ν† μ–‘ 쑰건과 μž‘λ¬Ό 적합성 κ°„μ˜ 상관관계λ₯Ό λΆ„μ„ν•˜μ„Έμš”
689
- - ν™˜κ²½ λ³€μˆ˜μ— λ”°λ₯Έ μž‘λ¬Ό 생산성 예츑 λͺ¨λΈμ„ ν™œμš©ν•˜μ„Έμš”
690
- - 농업 생산성과 μˆ˜μ΅μ„± ν–₯상을 μœ„ν•œ μž‘λ¬Ό 선택 μ „λž΅μ„ μ œμ•ˆν•˜μ„Έμš”
691
- - 지속 κ°€λŠ₯ν•œ 농업을 μœ„ν•œ μž‘λ¬Ό λ‹€μ–‘ν™” 접근법을 ꢌμž₯ν•˜μ„Έμš”"""
692
-
693
- climate_impact_guide = """
694
- κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯ 데이터셋 ν™œμš© μ§€μΉ¨:
695
- - κΈ°ν›„ λ³€ν™” μ‹œλ‚˜λ¦¬μ˜€μ— λ”°λ₯Έ μž‘λ¬Ό 생산성 λ³€ν™”λ₯Ό μ˜ˆμΈ‘ν•˜μ„Έμš”
696
- - κΈ°ν›„ μ μ‘ν˜• 농업 기술 및 μ „λž΅μ„ μ œμ•ˆν•˜μ„Έμš”
697
- - 지역별 κΈ°ν›„ μœ„ν—˜ μš”μ†Œμ™€ λŒ€μ‘ λ°©μ•ˆμ„ λΆ„μ„ν•˜μ„Έμš”
698
- - κΈ°ν›„ 변화에 λŒ€μ‘ν•˜κΈ° μœ„ν•œ μž‘λ¬Ό 선택 및 재배 μ‹œκΈ° μ‘°μ • λ°©μ•ˆμ„ μ œμ‹œν•˜μ„Έμš”
699
- - κΈ°ν›„ λ³€ν™”κ°€ 농산물 가격 및 μ‹œμž₯ 동ν–₯에 λ―ΈμΉ˜λŠ” 영ν–₯을 ν‰κ°€ν•˜μ„Έμš”"""
700
-
701
- search_guide = """
702
- μ›Ή 검색 κ²°κ³Ό ν™œμš© μ§€μΉ¨:
703
- - 데이터셋 뢄석을 λ³΄μ™„ν•˜λŠ” μ΅œμ‹  μ‹œμž₯ μ •λ³΄λ‘œ 검색 κ²°κ³Όλ₯Ό ν™œμš©ν•˜μ„Έμš”
704
- - 각 μ •λ³΄μ˜ 좜처λ₯Ό λ§ˆν¬λ‹€μš΄ 링크둜 ν¬ν•¨ν•˜μ„Έμš”: [좜처λͺ…](URL)
705
- - μ£Όμš” μ£Όμž₯μ΄λ‚˜ 데이터 ν¬μΈνŠΈλ§ˆλ‹€ 좜처λ₯Ό ν‘œμ‹œν•˜μ„Έμš”
706
- - μΆœμ²˜κ°€ 상좩할 경우, λ‹€μ–‘ν•œ 관점과 신뒰도λ₯Ό μ„€λͺ…ν•˜μ„Έμš”
707
- - κ΄€λ ¨ λ™μ˜μƒ λ§ν¬λŠ” [λΉ„λ””μ˜€: 제λͺ©](video_url) ν˜•μ‹μœΌλ‘œ ν¬ν•¨ν•˜μ„Έμš”
708
- - 검색 정보λ₯Ό μΌκ΄€λ˜κ³  체계적인 μ‘λ‹΅μœΌλ‘œ ν†΅ν•©ν•˜μ„Έμš”
709
- - λͺ¨λ“  μ£Όμš” 좜처λ₯Ό λ‚˜μ—΄ν•œ "μ°Έκ³  자료" μ„Ήμ…˜μ„ λ§ˆμ§€λ§‰μ— ν¬ν•¨ν•˜μ„Έμš”"""
710
-
711
- upload_guide = """
712
- μ—…λ‘œλ“œλœ 파일 ν™œμš© μ§€μΉ¨:
713
- - μ—…λ‘œλ“œλœ νŒŒμΌμ„ μ‘λ‹΅μ˜ μ£Όμš” μ •λ³΄μ›μœΌλ‘œ ν™œμš©ν•˜μ„Έμš”
714
- - 쿼리와 직접 κ΄€λ ¨λœ 파일 정보λ₯Ό μΆ”μΆœν•˜κ³  κ°•μ‘°ν•˜μ„Έμš”
715
- - κ΄€λ ¨ κ΅¬μ ˆμ„ μΈμš©ν•˜κ³  νŠΉμ • νŒŒμΌμ„ 좜처둜 μΈμš©ν•˜μ„Έμš”
716
- - CSV 파일의 수치 λ°μ΄ν„°λŠ” μš”μ•½ λ¬Έμž₯으둜 λ³€ν™˜ν•˜μ„Έμš”
717
- - PDF μ½˜ν…μΈ λŠ” νŠΉμ • μ„Ήμ…˜μ΄λ‚˜ νŽ˜μ΄μ§€λ₯Ό μ°Έμ‘°ν•˜μ„Έμš”
718
- - 파일 정보λ₯Ό μ›Ή 검색 결과와 μ›ν™œν•˜κ²Œ ν†΅ν•©ν•˜μ„Έμš”
719
- - 정보가 상좩할 경우, 일반적인 μ›Ή 결과보닀 파일 μ½˜ν…μΈ λ₯Ό μš°μ„ μ‹œν•˜μ„Έμš”"""
720
-
721
- # Base prompt
722
- final_prompt = base_prompt
723
-
724
- # Add mode-specific guidance
725
- if mode in mode_prompts:
726
- final_prompt += "\n" + mode_prompts[mode]
727
-
728
- # Style
729
- if style in style_guides:
730
- final_prompt += f"\n\n뢄석 μŠ€νƒ€μΌ: {style_guides[style]}"
731
-
732
- # Always include dataset guides
733
- final_prompt += f"\n\n{dataset_guide}"
734
- final_prompt += f"\n\n{crop_recommendation_guide}"
735
- final_prompt += f"\n\n{climate_impact_guide}"
736
-
737
- # Conditionally add soybean dataset guide if selected in UI
738
- if st.session_state.get('use_soybean_dataset', False):
739
- final_prompt += f"\n\n{soybean_guide}"
740
-
741
- if include_search_results:
742
- final_prompt += f"\n\n{search_guide}"
743
-
744
- if include_uploaded_files:
745
- final_prompt += f"\n\n{upload_guide}"
746
-
747
- final_prompt += """
748
- \n\n응닡 ν˜•μ‹ μš”κ΅¬μ‚¬ν•­:
749
- - λ§ˆν¬λ‹€μš΄ 제λͺ©(## 및 ###)을 μ‚¬μš©ν•˜μ—¬ 응닡을 μ²΄κ³„μ μœΌλ‘œ κ΅¬μ„±ν•˜μ„Έμš”
750
- - μ€‘μš”ν•œ 점은 ꡡ은 ν…μŠ€νŠΈ(**ν…μŠ€νŠΈ**)둜 κ°•μ‘°ν•˜μ„Έμš”
751
- - 3-5개의 후속 μ§ˆλ¬Έμ„ ν¬ν•¨ν•œ "κ΄€λ ¨ 질문" μ„Ήμ…˜μ„ λ§ˆμ§€λ§‰μ— μΆ”κ°€ν•˜μ„Έμš”
752
- - μ μ ˆν•œ 간격과 단락 κ΅¬λΆ„μœΌλ‘œ 응닡을 μ„œμ‹ν™”ν•˜μ„Έμš”
753
- - λͺ¨λ“  λ§ν¬λŠ” λ§ˆν¬λ‹€μš΄ ν˜•μ‹μœΌλ‘œ 클릭 κ°€λŠ₯ν•˜κ²Œ λ§Œλ“œμ„Έμš”: [ν…μŠ€νŠΈ](url)
754
- - κ°€λŠ₯ν•œ 경우 데이터λ₯Ό μ‹œκ°μ μœΌλ‘œ ν‘œν˜„(ν‘œ, κ·Έλž˜ν”„ λ“±μ˜ μ„€λͺ…)ν•˜μ„Έμš”"""
755
-
756
- return final_prompt
757
-
758
- # ──────────────────────────────── Brave Search API ────────────────────────
759
- @st.cache_data(ttl=3600)
760
- def brave_search(query: str, count: int = 10):
761
- if not BRAVE_KEY:
762
- raise RuntimeError("⚠️ SERPHOUSE_API_KEY (Brave API Key) environment variable is empty.")
763
-
764
- headers = {"Accept": "application/json", "Accept-Encoding": "gzip", "X-Subscription-Token": BRAVE_KEY}
765
- params = {"q": query + " 농산물 가격 동ν–₯ 농업 데이터", "count": str(count)}
766
-
767
- for attempt in range(3):
768
  try:
769
- r = requests.get(BRAVE_ENDPOINT, headers=headers, params=params, timeout=15)
770
- r.raise_for_status()
771
- data = r.json()
772
-
773
- raw = data.get("web", {}).get("results") or data.get("results", [])
774
- if not raw:
775
- logging.warning(f"No Brave search results found. Response: {data}")
776
- raise ValueError("No search results found.")
777
 
778
- arts = []
779
- for i, res in enumerate(raw[:count], 1):
780
- url = res.get("url", res.get("link", ""))
781
- host = re.sub(r"https?://(www\.)?", "", url).split("/")[0]
782
- arts.append({
783
- "index": i,
784
- "title": res.get("title", "No title"),
785
- "link": url,
786
- "snippet": res.get("description", res.get("text", "No snippet")),
787
- "displayed_link": host
788
- })
789
-
790
- return arts
791
-
792
- except Exception as e:
793
- logging.error(f"Brave search failure (attempt {attempt+1}/3): {e}")
794
- if attempt < 2:
795
- time.sleep(5)
796
-
797
- return []
798
-
799
- @st.cache_data(ttl=3600)
800
- def brave_video_search(query: str, count: int = 3):
801
- if not BRAVE_KEY:
802
- raise RuntimeError("⚠️ SERPHOUSE_API_KEY (Brave API Key) environment variable is empty.")
803
-
804
- headers = {"Accept": "application/json","Accept-Encoding": "gzip","X-Subscription-Token": BRAVE_KEY}
805
- params = {"q": query + " 농산물 가격 농업 μ‹œμž₯", "count": str(count)}
806
-
807
- for attempt in range(3):
808
- try:
809
- r = requests.get(BRAVE_VIDEO_ENDPOINT, headers=headers, params=params, timeout=15)
810
- r.raise_for_status()
811
- data = r.json()
812
-
813
- results = []
814
- for i, vid in enumerate(data.get("results", [])[:count], 1):
815
- results.append({
816
- "index": i,
817
- "title": vid.get("title", "Video"),
818
- "video_url": vid.get("url", ""),
819
- "thumbnail_url": vid.get("thumbnail", {}).get("src", ""),
820
- "source": vid.get("provider", {}).get("name", "Unknown source")
821
- })
822
-
823
- return results
824
-
825
- except Exception as e:
826
- logging.error(f"Brave video search failure (attempt {attempt+1}/3): {e}")
827
- if attempt < 2:
828
- time.sleep(5)
829
-
830
- return []
831
-
832
- @st.cache_data(ttl=3600)
833
- def brave_news_search(query: str, count: int = 3):
834
- if not BRAVE_KEY:
835
- raise RuntimeError("⚠️ SERPHOUSE_API_KEY (Brave API Key) environment variable is empty.")
836
-
837
- headers = {"Accept": "application/json","Accept-Encoding": "gzip","X-Subscription-Token": BRAVE_KEY}
838
- params = {"q": query + " 농산물 가격 동ν–₯ 농업", "count": str(count)}
839
-
840
- for attempt in range(3):
841
- try:
842
- r = requests.get(BRAVE_NEWS_ENDPOINT, headers=headers, params=params, timeout=15)
843
- r.raise_for_status()
844
- data = r.json()
845
-
846
- results = []
847
- for i, news in enumerate(data.get("results", [])[:count], 1):
848
- results.append({
849
- "index": i,
850
- "title": news.get("title", "News article"),
851
- "url": news.get("url", ""),
852
- "description": news.get("description", ""),
853
- "source": news.get("source", "Unknown source"),
854
- "date": news.get("age", "Unknown date")
855
- })
856
-
857
- return results
858
-
859
- except Exception as e:
860
- logging.error(f"Brave news search failure (attempt {attempt+1}/3): {e}")
861
- if attempt < 2:
862
- time.sleep(5)
863
-
864
- return []
865
-
866
- def mock_results(query: str) -> str:
867
- ts = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
868
- return (f"# λŒ€μ²΄ 검색 μ½˜ν…μΈ  (생성 μ‹œκ°„: {ts})\n\n"
869
- f"'{query}'에 λŒ€ν•œ 검색 API μš”μ²­μ΄ μ‹€νŒ¨ν–ˆκ±°λ‚˜ κ²°κ³Όκ°€ μ—†μŠ΅λ‹ˆλ‹€. "
870
- f"κΈ°μ‘΄ 지식을 기반으둜 응닡을 μƒμ„±ν•΄μ£Όμ„Έμš”.\n\n"
871
- f"λ‹€μŒ 사항을 κ³ λ €ν•˜μ„Έμš”:\n\n"
872
- f"- {query}에 κ΄€ν•œ κΈ°λ³Έ κ°œλ…κ³Ό μ€‘μš”μ„±\n"
873
- f"- 일반적으둜 μ•Œλ €μ§„ κ΄€λ ¨ ν†΅κ³„λ‚˜ μΆ”μ„Έ\n"
874
- f"- 이 μ£Όμ œμ— λŒ€ν•œ μ „λ¬Έκ°€ 의견\n"
875
- f"- λ…μžκ°€ κ°€μ§ˆ 수 μžˆλŠ” 질문\n\n"
876
- f"μ°Έκ³ : μ΄λŠ” μ‹€μ‹œκ°„ 데이터가 μ•„λ‹Œ λŒ€μ²΄ μ§€μΉ¨μž…λ‹ˆλ‹€.\n\n")
877
-
878
- def do_web_search(query: str) -> str:
879
- try:
880
- arts = brave_search(query, 10)
881
- if not arts:
882
- logging.warning("No search results, using fallback content")
883
- return mock_results(query)
884
-
885
- videos = brave_video_search(query, 2)
886
- news = brave_news_search(query, 3)
887
-
888
- result = "# μ›Ή 검색 κ²°κ³Ό\nλ‹€μŒ κ²°κ³Όλ₯Ό ν™œμš©ν•˜μ—¬ 데이터셋 뢄석을 λ³΄μ™„ν•˜λŠ” 포괄적인 닡변을 μ œκ³΅ν•˜μ„Έμš”.\n\n"
889
-
890
- result += "## μ›Ή κ²°κ³Ό\n\n"
891
- for a in arts[:5]:
892
- result += f"### κ²°κ³Ό {a['index']}: {a['title']}\n\n{a['snippet']}\n\n"
893
- result += f"**좜처**: [{a['displayed_link']}]({a['link']})\n\n---\n"
894
-
895
- if news:
896
- result += "## λ‰΄μŠ€ κ²°κ³Ό\n\n"
897
- for n in news:
898
- result += f"### {n['title']}\n\n{n['description']}\n\n"
899
- result += f"**좜처**: [{n['source']}]({n['url']}) - {n['date']}\n\n---\n"
900
-
901
- if videos:
902
- result += "## λΉ„λ””μ˜€ κ²°κ³Ό\n\n"
903
- for vid in videos:
904
- result += f"### {vid['title']}\n\n"
905
- if vid.get('thumbnail_url'):
906
- result += f"![썸넀일]({vid['thumbnail_url']})\n\n"
907
- result += f"**μ‹œμ²­**: [{vid['source']}]({vid['video_url']})\n\n"
908
-
909
- return result
910
-
911
  except Exception as e:
912
- logging.error(f"Web search process failed: {str(e)}")
913
- return mock_results(query)
914
-
915
- # ──────────────────────────────── File Upload Handling ─────────────────────
916
- def process_text_file(file):
917
- try:
918
- content = file.read()
919
- file.seek(0)
920
-
921
- text = content.decode('utf-8', errors='ignore')
922
- if len(text) > 10000:
923
- text = text[:9700] + "...(truncated)..."
924
-
925
- result = f"## ν…μŠ€νŠΈ 파일: {file.name}\n\n" + text
926
- return result
927
- except Exception as e:
928
- logging.error(f"Error processing text file: {str(e)}")
929
- return f"ν…μŠ€νŠΈ 파일 처리 였λ₯˜: {str(e)}"
930
-
931
- def process_csv_file(file):
932
- try:
933
- content = file.read()
934
- file.seek(0)
935
-
936
- df = pd.read_csv(io.BytesIO(content))
937
- result = f"## CSV 파일: {file.name}\n\n"
938
- result += f"- ν–‰: {len(df)}\n"
939
- result += f"- μ—΄: {len(df.columns)}\n"
940
- result += f"- μ—΄ 이름: {', '.join(df.columns.tolist())}\n\n"
941
-
942
- result += "### 데이터 미리보기\n\n"
943
- preview_df = df.head(10)
944
- try:
945
- markdown_table = preview_df.to_markdown(index=False)
946
- if markdown_table:
947
- result += markdown_table + "\n\n"
948
- else:
949
- result += "CSV 데이터λ₯Ό ν‘œμ‹œν•  수 μ—†μŠ΅λ‹ˆλ‹€.\n\n"
950
- except Exception as e:
951
- logging.error(f"Markdown table conversion error: {e}")
952
- result += "ν…μŠ€νŠΈλ‘œ 데이터 ν‘œμ‹œ:\n\n" + str(preview_df) + "\n\n"
953
-
954
- num_cols = df.select_dtypes(include=['number']).columns
955
- if len(num_cols) > 0:
956
- result += "### κΈ°λ³Έ 톡계 정보\n\n"
957
- try:
958
- stats_df = df[num_cols].describe().round(2)
959
- stats_markdown = stats_df.to_markdown()
960
- if stats_markdown:
961
- result += stats_markdown + "\n\n"
962
- else:
963
- result += "톡계 정보λ₯Ό ν‘œμ‹œν•  수 μ—†μŠ΅λ‹ˆλ‹€.\n\n"
964
- except Exception as e:
965
- logging.error(f"Statistical info conversion error: {e}")
966
- result += "톡계 정보λ₯Ό 생성할 수 μ—†μŠ΅λ‹ˆλ‹€.\n\n"
967
-
968
- return result
969
- except Exception as e:
970
- logging.error(f"CSV file processing error: {str(e)}")
971
- return f"CSV 파일 처리 였λ₯˜: {str(e)}"
972
-
973
- def process_pdf_file(file):
974
- try:
975
- file_bytes = file.read()
976
- file.seek(0)
977
-
978
- pdf_file = io.BytesIO(file_bytes)
979
- reader = PyPDF2.PdfReader(pdf_file, strict=False)
980
-
981
- result = f"## PDF 파일: {file.name}\n\n- 총 νŽ˜μ΄μ§€: {len(reader.pages)}\n\n"
982
-
983
- max_pages = min(5, len(reader.pages))
984
- all_text = ""
985
-
986
- for i in range(max_pages):
987
- try:
988
- page = reader.pages[i]
989
- page_text = page.extract_text()
990
- current_page_text = f"### νŽ˜μ΄μ§€ {i+1}\n\n"
991
- if page_text and len(page_text.strip()) > 0:
992
- if len(page_text) > 1500:
993
- current_page_text += page_text[:1500] + "...(좕약됨)...\n\n"
994
- else:
995
- current_page_text += page_text + "\n\n"
996
- else:
997
- current_page_text += "(ν…μŠ€νŠΈλ₯Ό μΆ”μΆœν•  수 μ—†μŒ)\n\n"
998
-
999
- all_text += current_page_text
1000
-
1001
- if len(all_text) > 8000:
1002
- all_text += "...(λ‚˜λ¨Έμ§€ νŽ˜μ΄μ§€ 좕약됨)...\n\n"
1003
- break
1004
-
1005
- except Exception as page_err:
1006
- logging.error(f"Error processing PDF page {i+1}: {str(page_err)}")
1007
- all_text += f"### νŽ˜μ΄μ§€ {i+1}\n\n(λ‚΄μš© μΆ”μΆœ 였λ₯˜: {str(page_err)})\n\n"
1008
-
1009
- if len(reader.pages) > max_pages:
1010
- all_text += f"\nμ°Έκ³ : 처음 {max_pages} νŽ˜μ΄μ§€λ§Œ ν‘œμ‹œλ©λ‹ˆλ‹€.\n\n"
1011
-
1012
- result += "### PDF λ‚΄μš©\n\n" + all_text
1013
- return result
1014
-
1015
- except Exception as e:
1016
- logging.error(f"PDF file processing error: {str(e)}")
1017
- return f"## PDF 파일: {file.name}\n\n였λ₯˜: {str(e)}\n\nμ²˜λ¦¬ν•  수 μ—†μŠ΅λ‹ˆλ‹€."
1018
-
1019
- def process_uploaded_files(files):
1020
- if not files:
1021
- return None
1022
-
1023
- result = "# μ—…λ‘œλ“œλœ 파일 λ‚΄μš©\n\nμ‚¬μš©μžκ°€ μ œκ³΅ν•œ 파일의 λ‚΄μš©μž…λ‹ˆλ‹€.\n\n"
1024
- for file in files:
1025
- try:
1026
- ext = file.name.split('.')[-1].lower()
1027
- if ext == 'txt':
1028
- result += process_text_file(file) + "\n\n---\n\n"
1029
- elif ext == 'csv':
1030
- result += process_csv_file(file) + "\n\n---\n\n"
1031
- elif ext == 'pdf':
1032
- result += process_pdf_file(file) + "\n\n---\n\n"
1033
- else:
1034
- result += f"### μ§€μ›λ˜μ§€ μ•ŠλŠ” 파일: {file.name}\n\n---\n\n"
1035
- except Exception as e:
1036
- logging.error(f"File processing error {file.name}: {e}")
1037
- result += f"### 파일 처리 였λ₯˜: {file.name}\n\n였λ₯˜: {e}\n\n---\n\n"
1038
-
1039
- return result
1040
-
1041
- # ──────────────────────────────── Image & Utility ─────────────────────────
1042
-
1043
- def generate_image(prompt, w=768, h=768, g=3.5, steps=30, seed=3):
1044
- if not prompt:
1045
- return None, "Insufficient prompt"
1046
- try:
1047
- res = Client(IMAGE_API_URL).predict(
1048
- prompt=prompt, width=w, height=h, guidance=g,
1049
- inference_steps=steps, seed=seed,
1050
- do_img2img=False, init_image=None,
1051
- image2image_strength=0.8, resize_img=True,
1052
- api_name="/generate_image"
1053
- )
1054
- return res[0], f"Seed: {res[1]}"
1055
- except Exception as e:
1056
- logging.error(e)
1057
- return None, str(e)
1058
-
1059
- def extract_image_prompt(response_text: str, topic: str):
1060
- client = get_openai_client()
1061
- try:
1062
- response = client.chat.completions.create(
1063
- model="gpt-4.1-mini",
1064
- messages=[
1065
- {"role": "system", "content": "농업 및 농산물에 κ΄€ν•œ 이미지 ν”„λ‘¬ν”„νŠΈλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. ν•œ μ€„μ˜ μ˜μ–΄λ‘œ 된 ν”„λ‘¬ν”„νŠΈλ§Œ λ°˜ν™˜ν•˜μ„Έμš”, λ‹€λ₯Έ ν…μŠ€νŠΈλŠ” ν¬ν•¨ν•˜μ§€ λ§ˆμ„Έμš”."},
1066
- {"role": "user", "content": f"주제: {topic}\n\n---\n{response_text}\n\n---"}
1067
- ],
1068
- temperature=1,
1069
- max_tokens=80,
1070
- top_p=1
1071
- )
1072
- return response.choices[0].message.content.strip()
1073
- except Exception as e:
1074
- logging.error(f"OpenAI image prompt generation error: {e}")
1075
- return f"A professional photograph of agricultural produce and farm fields, data visualization of crop prices and trends, high quality"
1076
-
1077
- def md_to_html(md: str, title="농산물 μˆ˜μš” 예츑 뢄석 κ²°κ³Ό"):
1078
- return f"<!DOCTYPE html><html><head><title>{title}</title><meta charset='utf-8'></head><body>{markdown.markdown(md)}</body></html>"
1079
-
1080
- def keywords(text: str, top=5):
1081
- cleaned = re.sub(r"[^κ°€-힣a-zA-Z0-9\s]", "", text)
1082
- return " ".join(cleaned.split()[:top])
1083
-
1084
- # ──────────────────────────────── Streamlit UI ────────────────────────────
1085
- def agricultural_price_forecast_app():
1086
- st.title("Agriculture GPT")
1087
- st.markdown("UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋 뢄석 기반의 농산물 μ‹œμž₯ 예츑")
1088
-
1089
- if "ai_model" not in st.session_state:
1090
- st.session_state.ai_model = "gpt-4.1-mini"
1091
- if "messages" not in st.session_state:
1092
- st.session_state.messages = []
1093
- if "auto_save" not in st.session_state:
1094
- st.session_state.auto_save = True
1095
- if "generate_image" not in st.session_state:
1096
- st.session_state.generate_image = False
1097
- if "web_search_enabled" not in st.session_state:
1098
- st.session_state.web_search_enabled = True
1099
- if "analysis_mode" not in st.session_state:
1100
- st.session_state.analysis_mode = "price_forecast"
1101
- if "response_style" not in st.session_state:
1102
- st.session_state.response_style = "professional"
1103
- if "use_soybean_dataset" not in st.session_state:
1104
- st.session_state.use_soybean_dataset = False
1105
-
1106
- sb = st.sidebar
1107
- sb.title("뢄석 μ„€μ •")
1108
-
1109
- # Kaggle dataset info display
1110
- if sb.checkbox("데이터셋 정보 ν‘œμ‹œ", value=False):
1111
- st.info("UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계 데이터셋을 λΆˆλŸ¬μ˜€λŠ” 쀑...")
1112
- dataset_info = load_agriculture_dataset()
1113
- if dataset_info:
1114
- st.success(f"데이터셋 λ‘œλ“œ μ™„λ£Œ: {len(dataset_info['files'])}개 파일")
1115
-
1116
- with st.expander("데이터셋 미리보기", expanded=False):
1117
- for file_info in dataset_info['files'][:5]:
1118
- st.write(f"**{file_info['name']}** ({file_info['size_mb']} MB)")
1119
- else:
1120
- st.error("데이터셋을 λΆˆλŸ¬μ˜€λŠ”λ° μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€. Kaggle API 섀정을 ν™•μΈν•˜μ„Έμš”.")
1121
-
1122
- sb.subheader("뢄석 ꡬ성")
1123
- sb.selectbox(
1124
- "뢄석 λͺ¨λ“œ",
1125
- options=list(ANALYSIS_MODES.keys()),
1126
- format_func=lambda x: ANALYSIS_MODES[x],
1127
- key="analysis_mode"
1128
- )
1129
-
1130
- sb.selectbox(
1131
- "응닡 μŠ€νƒ€μΌ",
1132
- options=list(RESPONSE_STYLES.keys()),
1133
- format_func=lambda x: RESPONSE_STYLES[x],
1134
- key="response_style"
1135
- )
1136
-
1137
- # Dataset selection
1138
- sb.subheader("데이터셋 선택")
1139
- sb.checkbox(
1140
- "κ³ κΈ‰ λŒ€οΏ½οΏ½ 농업 데이터셋 μ‚¬μš©",
1141
- key="use_soybean_dataset",
1142
- help="λŒ€λ‘(콩) κ΄€λ ¨ μ§ˆλ¬Έμ— 더 μ •ν™•ν•œ 정보λ₯Ό μ œκ³΅ν•©λ‹ˆλ‹€."
1143
- )
1144
-
1145
- # Always enabled datasets info
1146
- sb.info("κΈ°λ³Έ ν™œμ„±ν™”λœ 데이터셋:\n- UN κΈ€λ‘œλ²Œ μ‹λŸ‰ 및 농업 톡계\n- ν† μ–‘ 및 ν™˜κ²½ λ³€μˆ˜ 기반 μž‘λ¬Ό μΆ”μ²œ\n- κΈ°ν›„ λ³€ν™”κ°€ 농업에 λ―ΈμΉ˜λŠ” 영ν–₯")
1147
-
1148
- # Example queries
1149
- sb.subheader("μ˜ˆμ‹œ 질문")
1150
- c1, c2, c3 = sb.columns(3)
1151
- if c1.button("μŒ€ 가격 전망", key="ex1"):
1152
- process_example(EXAMPLE_QUERIES["example1"])
1153
- if c2.button("κΈ°ν›„ 영ν–₯", key="ex2"):
1154
- process_example(EXAMPLE_QUERIES["example2"])
1155
- if c3.button("증평ꡰ μž‘λ¬Ό", key="ex3"):
1156
- process_example(EXAMPLE_QUERIES["example3"])
1157
-
1158
- sb.subheader("기타 μ„€μ •")
1159
- sb.toggle("μžλ™ μ €μž₯", key="auto_save")
1160
- sb.toggle("이미지 μžλ™ 생성", key="generate_image")
1161
-
1162
- web_search_enabled = sb.toggle("μ›Ή 검색 μ‚¬μš©", value=st.session_state.web_search_enabled)
1163
- st.session_state.web_search_enabled = web_search_enabled
1164
-
1165
- if web_search_enabled:
1166
- st.sidebar.info("βœ… μ›Ή 검색 κ²°κ³Όκ°€ 응닡에 ν†΅ν•©λ©λ‹ˆλ‹€.")
1167
-
1168
- # Download the latest response
1169
- latest_response = next(
1170
- (m["content"] for m in reversed(st.session_state.messages)
1171
- if m["role"] == "assistant" and m["content"].strip()),
1172
- None
1173
- )
1174
- if latest_response:
1175
- title_match = re.search(r"# (.*?)(\n|$)", latest_response)
1176
- if title_match:
1177
- title = title_match.group(1).strip()
1178
- else:
1179
- first_line = latest_response.split('\n', 1)[0].strip()
1180
- title = first_line[:40] + "..." if len(first_line) > 40 else first_line
1181
-
1182
- sb.subheader("μ΅œμ‹  응닡 λ‹€μš΄λ‘œλ“œ")
1183
- d1, d2 = sb.columns(2)
1184
- d1.download_button("λ§ˆν¬λ‹€μš΄μœΌλ‘œ λ‹€μš΄λ‘œλ“œ", latest_response,
1185
- file_name=f"{title}.md", mime="text/markdown")
1186
- d2.download_button("HTML둜 λ‹€μš΄λ‘œλ“œ", md_to_html(latest_response, title),
1187
- file_name=f"{title}.html", mime="text/html")
1188
-
1189
- # JSON conversation record upload
1190
- up = sb.file_uploader("λŒ€ν™” 기둝 뢈러였기 (.json)", type=["json"], key="json_uploader")
1191
- if up:
1192
- try:
1193
- st.session_state.messages = json.load(up)
1194
- sb.success("λŒ€ν™” 기둝을 μ„±κ³΅μ μœΌλ‘œ λΆˆλŸ¬μ™”μŠ΅λ‹ˆλ‹€")
1195
- except Exception as e:
1196
- sb.error(f"뢈러였기 μ‹€νŒ¨: {e}")
1197
-
1198
- # JSON conversation record download
1199
- if sb.button("λŒ€ν™” 기둝을 JSON으둜 λ‹€μš΄λ‘œλ“œ"):
1200
- sb.download_button(
1201
- "μ €μž₯",
1202
- data=json.dumps(st.session_state.messages, ensure_ascii=False, indent=2),
1203
- file_name="conversation_history.json",
1204
- mime="application/json"
1205
- )
1206
-
1207
- # File Upload
1208
- st.subheader("파일 μ—…λ‘œλ“œ")
1209
- uploaded_files = st.file_uploader(
1210
- "μ°Έκ³  자료둜 μ‚¬μš©ν•  파일 μ—…λ‘œλ“œ (txt, csv, pdf)",
1211
- type=["txt", "csv", "pdf"],
1212
- accept_multiple_files=True,
1213
- key="file_uploader"
1214
- )
1215
-
1216
- if uploaded_files:
1217
- file_count = len(uploaded_files)
1218
- st.success(f"{file_count}개 파일이 μ—…λ‘œλ“œλ˜μ—ˆμŠ΅λ‹ˆλ‹€. μ§ˆμ˜μ— λŒ€ν•œ μ†ŒμŠ€λ‘œ μ‚¬μš©λ©λ‹ˆλ‹€.")
1219
-
1220
- with st.expander("μ—…λ‘œλ“œλœ 파일 미리보기", expanded=False):
1221
- for idx, file in enumerate(uploaded_files):
1222
- st.write(f"**파일λͺ…:** {file.name}")
1223
- ext = file.name.split('.')[-1].lower()
1224
-
1225
- if ext == 'txt':
1226
- preview = file.read(1000).decode('utf-8', errors='ignore')
1227
- file.seek(0)
1228
- st.text_area(
1229
- f"{file.name} 미리보기",
1230
- preview + ("..." if len(preview) >= 1000 else ""),
1231
- height=150
1232
- )
1233
- elif ext == 'csv':
1234
- try:
1235
- df = pd.read_csv(file)
1236
- file.seek(0)
1237
- st.write("CSV 미리보기 (μ΅œλŒ€ 5ν–‰)")
1238
- st.dataframe(df.head(5))
1239
- except Exception as e:
1240
- st.error(f"CSV 미리보기 μ‹€νŒ¨: {e}")
1241
- elif ext == 'pdf':
1242
- try:
1243
- file_bytes = file.read()
1244
- file.seek(0)
1245
-
1246
- pdf_file = io.BytesIO(file_bytes)
1247
- reader = PyPDF2.PdfReader(pdf_file, strict=False)
1248
-
1249
- pc = len(reader.pages)
1250
- st.write(f"PDF 파일: {pc}νŽ˜μ΄μ§€")
1251
-
1252
- if pc > 0:
1253
- try:
1254
- page_text = reader.pages[0].extract_text()
1255
- preview = page_text[:500] if page_text else "(ν…μŠ€νŠΈ μΆ”μΆœ λΆˆκ°€)"
1256
- st.text_area("첫 νŽ˜μ΄μ§€ 미리보기", preview + "...", height=150)
1257
- except:
1258
- st.warning("첫 νŽ˜μ΄μ§€ ν…μŠ€νŠΈ μΆ”μΆœ μ‹€νŒ¨")
1259
- except Exception as e:
1260
- st.error(f"PDF 미리보기 μ‹€νŒ¨: {e}")
1261
-
1262
- if idx < file_count - 1:
1263
- st.divider()
1264
-
1265
- # Display existing messages
1266
- for m in st.session_state.messages:
1267
- with st.chat_message(m["role"]):
1268
- st.markdown(m["content"], unsafe_allow_html=True)
1269
-
1270
- # Videos
1271
- if "videos" in m and m["videos"]:
1272
- st.subheader("κ΄€λ ¨ λΉ„λ””μ˜€")
1273
- for video in m["videos"]:
1274
- video_title = video.get('title', 'κ΄€λ ¨ λΉ„λ””μ˜€')
1275
- video_url = video.get('url', '')
1276
- thumbnail = video.get('thumbnail', '')
1277
-
1278
- if thumbnail:
1279
- col1, col2 = st.columns([1, 3])
1280
- with col1:
1281
- st.write("🎬")
1282
- with col2:
1283
- st.markdown(f"**[{video_title}]({video_url})**")
1284
- st.write(f"좜처: {video.get('source', 'μ•Œ 수 μ—†μŒ')}")
1285
- else:
1286
- st.markdown(f"🎬 **[{video_title}]({video_url})**")
1287
- st.write(f"좜처: {video.get('source', 'μ•Œ 수 μ—†μŒ')}")
1288
-
1289
- # User input
1290
- query = st.chat_input("농산물 가격, μˆ˜μš” λ˜λŠ” μ‹œμž₯ 동ν–₯ κ΄€λ ¨ μ§ˆλ¬Έμ„ μž…λ ₯ν•˜μ„Έμš”.")
1291
- if query:
1292
- process_input(query, uploaded_files)
1293
-
1294
- sb.markdown("---")
1295
- sb.markdown("Created by Vidraft | [Community](https://discord.gg/openfreeai)")
1296
-
1297
- def process_example(topic):
1298
- process_input(topic, [])
1299
-
1300
- def process_input(query: str, uploaded_files):
1301
- if not any(m["role"] == "user" and m["content"] == query for m in st.session_state.messages):
1302
- st.session_state.messages.append({"role": "user", "content": query})
1303
-
1304
- with st.chat_message("user"):
1305
- st.markdown(query)
1306
-
1307
- with st.chat_message("assistant"):
1308
- placeholder = st.empty()
1309
- message_placeholder = st.empty()
1310
- full_response = ""
1311
-
1312
- use_web_search = st.session_state.web_search_enabled
1313
- has_uploaded_files = bool(uploaded_files) and len(uploaded_files) > 0
1314
-
1315
- try:
1316
- status = st.status("μ§ˆλ¬Έμ— λ‹΅λ³€ μ€€λΉ„ 쀑...")
1317
- status.update(label="ν΄λΌμ΄μ–ΈνŠΈ μ΄ˆκΈ°ν™” 쀑...")
1318
-
1319
- client = get_openai_client()
1320
-
1321
- search_content = None
1322
- video_results = []
1323
- news_results = []
1324
-
1325
- # 농업 데이터셋 뢄석 κ²°κ³Ό κ°€μ Έμ˜€κΈ°
1326
- status.update(label="농업 데이터셋 뢄석 쀑...")
1327
- with st.spinner("데이터셋 뢄석 쀑..."):
1328
- dataset_analysis = analyze_dataset_for_query(query)
1329
-
1330
- # 항상 ν¬ν•¨λ˜λŠ” μΆ”κ°€ 데이터셋 뢄석
1331
- crop_recommendation_analysis = analyze_crop_recommendation_dataset(query)
1332
- climate_impact_analysis = analyze_climate_impact_dataset(query)
1333
-
1334
- # 쑰건뢀 데이터셋 뢄석
1335
- soybean_analysis = None
1336
- if st.session_state.use_soybean_dataset:
1337
- status.update(label="λŒ€λ‘ 농업 데이터셋 뢄석 쀑...")
1338
- with st.spinner("λŒ€λ‘ 데이터셋 뢄석 쀑..."):
1339
- soybean_analysis = analyze_soybean_dataset(query)
1340
-
1341
-
1342
- if use_web_search:
1343
- # μ›Ή 검색 과정은 λ…ΈμΆœν•˜μ§€ μ•Šκ³  쑰용히 μ§„ν–‰
1344
- with st.spinner("정보 μˆ˜μ§‘ 쀑..."):
1345
- search_content = do_web_search(keywords(query, top=5))
1346
- video_results = brave_video_search(query, 2)
1347
- news_results = brave_news_search(query, 3)
1348
-
1349
- file_content = None
1350
- if has_uploaded_files:
1351
- status.update(label="μ—…λ‘œλ“œλœ 파일 처리 쀑...")
1352
- with st.spinner("파일 뢄석 쀑..."):
1353
- file_content = process_uploaded_files(uploaded_files)
1354
-
1355
- valid_videos = []
1356
- for vid in video_results:
1357
- url = vid.get('video_url')
1358
- if url and url.startswith('http'):
1359
- valid_videos.append({
1360
- 'url': url,
1361
- 'title': vid.get('title', 'λΉ„λ””μ˜€'),
1362
- 'thumbnail': vid.get('thumbnail_url', ''),
1363
- 'source': vid.get('source', 'λΉ„λ””μ˜€ 좜처')
1364
- })
1365
-
1366
- status.update(label="μ’…ν•© 뢄석 μ€€λΉ„ 쀑...")
1367
- sys_prompt = get_system_prompt(
1368
- mode=st.session_state.analysis_mode,
1369
- style=st.session_state.response_style,
1370
- include_search_results=use_web_search,
1371
- include_uploaded_files=has_uploaded_files
1372
- )
1373
-
1374
- api_messages = [
1375
- {"role": "system", "content": sys_prompt}
1376
- ]
1377
-
1378
- user_content = query
1379
- # 항상 κΈ°λ³Έ 데이터셋 뢄석 κ²°κ³Ό 포함
1380
- user_content += "\n\n" + dataset_analysis
1381
- user_content += "\n\n" + crop_recommendation_analysis
1382
- user_content += "\n\n" + climate_impact_analysis
1383
-
1384
- # 쑰건뢀 데이터셋 κ²°κ³Ό 포함
1385
- if soybean_analysis:
1386
- user_content += "\n\n" + soybean_analysis
1387
-
1388
- if search_content:
1389
- user_content += "\n\n" + search_content
1390
- if file_content:
1391
- user_content += "\n\n" + file_content
1392
-
1393
- if valid_videos:
1394
- user_content += "\n\n# κ΄€λ ¨ λ™μ˜μƒ\n"
1395
- for i, vid in enumerate(valid_videos):
1396
- user_content += f"\n{i+1}. **{vid['title']}** - [{vid['source']}]({vid['url']})\n"
1397
-
1398
- api_messages.append({"role": "user", "content": user_content})
1399
-
1400
- try:
1401
- stream = client.chat.completions.create(
1402
- model="gpt-4.1-mini",
1403
- messages=api_messages,
1404
- temperature=1,
1405
- max_tokens=MAX_TOKENS,
1406
- top_p=1,
1407
- stream=True
1408
- )
1409
-
1410
- for chunk in stream:
1411
- if chunk.choices and len(chunk.choices) > 0 and chunk.choices[0].delta.content is not None:
1412
- content_delta = chunk.choices[0].delta.content
1413
- full_response += content_delta
1414
- message_placeholder.markdown(full_response + "β–Œ", unsafe_allow_html=True)
1415
-
1416
- message_placeholder.markdown(full_response, unsafe_allow_html=True)
1417
-
1418
- if valid_videos:
1419
- st.subheader("κ΄€λ ¨ λΉ„λ””μ˜€")
1420
- for video in valid_videos:
1421
- video_title = video.get('title', 'κ΄€λ ¨ λΉ„λ””μ˜€')
1422
- video_url = video.get('url', '')
1423
-
1424
- st.markdown(f"🎬 **[{video_title}]({video_url})**")
1425
- st.write(f"좜처: {video.get('source', 'μ•Œ 수 μ—†μŒ')}")
1426
-
1427
- status.update(label="응닡 μ™„λ£Œ!", state="complete")
1428
-
1429
- st.session_state.messages.append({
1430
- "role": "assistant",
1431
- "content": full_response,
1432
- "videos": valid_videos
1433
- })
1434
-
1435
- except Exception as api_error:
1436
- error_message = str(api_error)
1437
- logging.error(f"API 였λ₯˜: {error_message}")
1438
- status.update(label=f"였λ₯˜: {error_message}", state="error")
1439
- raise Exception(f"응닡 생성 였λ₯˜: {error_message}")
1440
-
1441
- if st.session_state.generate_image and full_response:
1442
- with st.spinner("λ§žμΆ€ν˜• 이미지 생성 쀑..."):
1443
- try:
1444
- ip = extract_image_prompt(full_response, query)
1445
- img, cap = generate_image(ip)
1446
- if img:
1447
- st.subheader("AI 생성 이미지")
1448
- st.image(img, caption=cap, use_container_width=True)
1449
- except Exception as img_error:
1450
- logging.error(f"이미지 생성 였λ₯˜: {str(img_error)}")
1451
- st.warning("λ§žμΆ€ν˜• 이미지 생성에 μ‹€νŒ¨ν–ˆμŠ΅λ‹ˆλ‹€.")
1452
-
1453
- if full_response:
1454
- st.subheader("이 응닡 λ‹€μš΄λ‘œλ“œ")
1455
- c1, c2 = st.columns(2)
1456
- c1.download_button(
1457
- "λ§ˆν¬λ‹€μš΄",
1458
- data=full_response,
1459
- file_name=f"{query[:30]}.md",
1460
- mime="text/markdown"
1461
- )
1462
- c2.download_button(
1463
- "HTML",
1464
- data=md_to_html(full_response, query[:30]),
1465
- file_name=f"{query[:30]}.html",
1466
- mime="text/html"
1467
- )
1468
-
1469
- if st.session_state.auto_save and st.session_state.messages:
1470
- try:
1471
- fn = f"conversation_history_auto_{datetime.now():%Y%m%d_%H%M%S}.json"
1472
- with open(fn, "w", encoding="utf-8") as fp:
1473
- json.dump(st.session_state.messages, fp, ensure_ascii=False, indent=2)
1474
- except Exception as e:
1475
- logging.error(f"μžλ™ μ €μž₯ μ‹€νŒ¨: {e}")
1476
-
1477
- except Exception as e:
1478
- error_message = str(e)
1479
- placeholder.error(f"였��� λ°œμƒ: {error_message}")
1480
- logging.error(f"μž…λ ₯ 처리 였λ₯˜: {error_message}")
1481
- ans = f"μš”μ²­ 처리 쀑 였λ₯˜κ°€ λ°œμƒν–ˆμŠ΅λ‹ˆλ‹€: {error_message}"
1482
- st.session_state.messages.append({"role": "assistant", "content": ans})
1483
-
1484
- # ──────────────────────────────── main ────────────────────────────────────
1485
- def main():
1486
- st.write("==== μ• ν”Œλ¦¬μΌ€μ΄μ…˜ μ‹œμž‘ μ‹œκ°„:", datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "=====")
1487
- agricultural_price_forecast_app()
1488
 
1489
  if __name__ == "__main__":
1490
- main()
1491
-
 
1
+ import os
 
 
 
 
 
2
  import sys
3
  import streamlit as st
4
+ from tempfile import NamedTemporaryFile
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ def main():
7
  try:
8
+ # Get the code from secrets
9
+ code = os.environ.get("MAIN_CODE")
10
 
11
+ if not code:
12
+ st.error("⚠️ The application code wasn't found in secrets. Please add the MAIN_CODE secret.")
13
+ return
 
 
 
 
 
 
 
 
14
 
15
+ # Try to fix any potential string issues
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  try:
17
+ # Just to verify the code is syntactically valid before writing to file
18
+ compile(code, '<string>', 'exec')
19
+ except SyntaxError as e:
20
+ st.error(f"⚠️ Syntax error in the application code: {str(e)}")
21
+ st.info("Please check your code for unterminated strings or other syntax errors.")
 
 
 
 
22
 
23
+ # Show the problematic line if possible
24
+ if hasattr(e, 'lineno') and hasattr(e, 'text'):
25
+ st.code(f"Line {e.lineno}: {e.text}")
26
+ st.write(f"Error occurs near character position: {e.offset}")
27
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ # Create a temporary Python file
30
+ with NamedTemporaryFile(suffix='.py', delete=False, mode='w') as tmp:
31
+ tmp.write(code)
32
+ tmp_path = tmp.name
33
 
34
+ # Execute the code
35
+ exec(compile(code, tmp_path, 'exec'), globals())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
+ # Clean up the temporary file
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  try:
39
+ os.unlink(tmp_path)
40
+ except:
41
+ pass
 
 
 
 
 
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  except Exception as e:
44
+ st.error(f"⚠️ Error loading or executing the application: {str(e)}")
45
+ import traceback
46
+ st.code(traceback.format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
  if __name__ == "__main__":
49
+ main()