Prak2005 commited on
Commit
cd9c7e8
Β·
verified Β·
1 Parent(s): bd09347

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +909 -162
app.py CHANGED
@@ -3,10 +3,14 @@ import os
3
  import json
4
  import logging
5
  import random
 
 
6
  from datetime import datetime, timedelta
7
- from typing import Dict, Any, List, Optional
8
  from pydantic import BaseModel, Field
9
  import gradio as gr
 
 
10
 
11
  # Setup logging
12
  logging.basicConfig(
@@ -15,7 +19,18 @@ logging.basicConfig(
15
  )
16
  logger = logging.getLogger(__name__)
17
 
18
- # ========== DATA MODELS ==========
 
 
 
 
 
 
 
 
 
 
 
19
  class UserVerification(BaseModel):
20
  user_id: str = Field(..., description="User ID")
21
  name: str = Field(..., description="User name")
@@ -23,6 +38,16 @@ class UserVerification(BaseModel):
23
  is_verified: bool = Field(..., description="KYC verification status")
24
  verification_level: str = Field(..., description="Verification level: basic, standard, premium")
25
  risk_score: float = Field(..., description="Risk score 0-1")
 
 
 
 
 
 
 
 
 
 
26
 
27
  class ScalpingDetection(BaseModel):
28
  is_scalper: bool = Field(..., description="Whether user is detected as scalper")
@@ -30,14 +55,19 @@ class ScalpingDetection(BaseModel):
30
  flags: List[str] = Field(..., description="Suspicious activity flags")
31
  purchase_velocity: int = Field(..., description="Number of purchases in last hour")
32
  ip_duplicates: int = Field(..., description="Number of accounts from same IP")
 
 
33
 
34
  class PricingRecommendation(BaseModel):
35
  original_price: float = Field(..., description="Original ticket price")
36
  recommended_resale_price: float = Field(..., description="Recommended resale price")
 
37
  demand_level: str = Field(..., description="Current demand level")
38
  price_adjustment_reason: str = Field(..., description="Reason for price adjustment")
39
  profit_margin: Optional[float] = Field(None, description="Profit margin percentage")
40
  loss_percentage: Optional[float] = Field(None, description="Loss percentage if selling below cost")
 
 
41
 
42
  class ResaleCompliance(BaseModel):
43
  is_compliant: bool = Field(..., description="Whether resale is policy compliant")
@@ -45,84 +75,505 @@ class ResaleCompliance(BaseModel):
45
  resale_allowed: bool = Field(..., description="Whether resale is allowed")
46
  max_allowed_price: float = Field(..., description="Maximum allowed resale price")
47
  recommendation: str = Field(..., description="Compliance recommendation")
 
48
 
49
- class TicketingSystemReport(BaseModel):
50
  verification: UserVerification
51
  scalping_detection: ScalpingDetection
52
  pricing: PricingRecommendation
53
  compliance: ResaleCompliance
54
  final_decision: str = Field(..., description="Final system decision")
55
  action_items: List[str] = Field(..., description="Recommended actions")
 
 
56
 
57
- # ========== MOCK DATA STORE ==========
58
- class MockDatabase:
59
- """Simulates a database for the MVP"""
 
60
  def __init__(self):
61
- self.user_purchase_history = {}
62
- self.ip_addresses = {}
63
- self.resale_history = {}
 
 
 
 
 
 
 
 
 
 
64
 
65
- def get_user_purchases(self, user_id: str) -> int:
66
- """Get number of recent purchases for a user"""
67
- return self.user_purchase_history.get(user_id, random.randint(0, 5))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
- def get_ip_accounts(self, user_id: str) -> int:
70
- """Get number of accounts from same IP"""
71
- return random.randint(1, 4)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
- def get_resale_frequency(self, user_id: str) -> int:
74
- """Get user's resale frequency"""
75
- return self.resale_history.get(user_id, random.randint(0, 10))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76
 
77
- # Initialize mock database
78
- mock_db = MockDatabase()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
- # ========== VERIFICATION LOGIC ==========
81
- class UserVerificationEngine:
82
- @staticmethod
83
- def verify_user(name: str, email: str, user_id: str) -> UserVerification:
84
- """Verify user based on provided information"""
 
 
85
 
86
- # Email validation
87
  email_valid = "@" in email and "." in email.split("@")[-1]
88
-
89
- # Name validation (basic check)
90
  name_valid = len(name.strip()) >= 2 and not any(char.isdigit() for char in name)
91
 
92
- # Risk assessment
93
  risk_factors = []
94
  if not email_valid:
95
  risk_factors.append("invalid_email")
96
  if not name_valid:
97
  risk_factors.append("suspicious_name")
98
- if email.endswith(('.temp', '.fake', '.test')):
99
  risk_factors.append("temporary_email")
 
 
100
 
101
- risk_score = min(0.9, len(risk_factors) * 0.3 + random.uniform(0.1, 0.3))
102
  is_verified = risk_score < 0.5 and email_valid and name_valid
103
-
104
  verification_level = "premium" if risk_score < 0.2 else "standard" if risk_score < 0.5 else "basic"
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
  return UserVerification(
107
  user_id=user_id,
108
  name=name,
109
  email=email,
110
  is_verified=is_verified,
111
  verification_level=verification_level,
112
- risk_score=risk_score
 
113
  )
114
 
115
- class ScalpingDetectionEngine:
116
- @staticmethod
117
- def detect_scalping(user_id: str, ticket_quantity: int, event_name: str) -> ScalpingDetection:
118
- """Detect potential scalping behavior"""
 
 
 
 
 
119
 
120
  flags = []
121
- purchase_velocity = mock_db.get_user_purchases(user_id)
122
- ip_duplicates = mock_db.get_ip_accounts(user_id)
123
- resale_frequency = mock_db.get_resale_frequency(user_id)
124
 
125
- # Check for scalping indicators
126
  if purchase_velocity > 3:
127
  flags.append("rapid_purchases")
128
  if ip_duplicates > 2:
@@ -131,47 +582,99 @@ class ScalpingDetectionEngine:
131
  flags.append("bulk_purchase")
132
  if resale_frequency > 5:
133
  flags.append("frequent_reseller")
 
 
134
 
135
  # Calculate scalping probability
136
  scalping_score = (
137
- (purchase_velocity / 10) * 0.3 +
138
- (ip_duplicates / 5) * 0.3 +
139
- (ticket_quantity / 10) * 0.2 +
140
- (resale_frequency / 20) * 0.2
141
  )
142
 
143
- is_scalper = scalping_score > 0.5
144
- confidence = min(0.95, scalping_score + random.uniform(0.1, 0.2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
  return ScalpingDetection(
147
  is_scalper=is_scalper,
148
  confidence=confidence,
149
  flags=flags,
150
  purchase_velocity=purchase_velocity,
151
- ip_duplicates=ip_duplicates
 
 
152
  )
153
 
154
- class DynamicPricingEngine:
155
- @staticmethod
156
- def calculate_pricing(original_price: float, demand_level: str, proposed_price: float) -> PricingRecommendation:
157
- """Calculate recommended pricing based on demand"""
 
 
 
158
 
 
 
 
 
159
  demand_multipliers = {
160
- "low": (0.8, 1.1), # Allow 20% below to 10% above
161
- "medium": (0.9, 1.25), # Allow 10% below to 25% above
162
- "high": (1.0, 1.5) # Allow original price to 50% above
163
  }
164
 
165
- min_mult, max_mult = demand_multipliers.get(demand_level, (0.9, 1.25))
 
 
 
 
 
 
 
 
 
 
 
166
 
 
167
  min_price = original_price * min_mult
168
  max_price = original_price * max_mult
169
 
 
 
 
 
 
 
170
  # Recommend price within acceptable range
171
  recommended_price = max(min_price, min(proposed_price, max_price))
172
 
 
173
  price_ratio = recommended_price / original_price
174
-
175
  profit_margin = None
176
  loss_percentage = None
177
 
@@ -180,76 +683,166 @@ class DynamicPricingEngine:
180
  elif price_ratio < 1.0:
181
  loss_percentage = (1 - price_ratio) * 100
182
 
183
- reason = f"Adjusted for {demand_level} demand market conditions"
 
184
  if recommended_price != proposed_price:
185
- reason += f" (modified from ${proposed_price:.2f})"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
187
  return PricingRecommendation(
188
  original_price=original_price,
189
  recommended_resale_price=recommended_price,
 
190
  demand_level=demand_level,
191
  price_adjustment_reason=reason,
192
  profit_margin=profit_margin,
193
- loss_percentage=loss_percentage
 
 
194
  )
195
 
196
- class ComplianceEngine:
197
- @staticmethod
198
- def check_compliance(
199
- user_id: str,
200
- proposed_price: float,
 
 
 
201
  original_price: float,
202
- scalping_detection: ScalpingDetection
 
203
  ) -> ResaleCompliance:
204
- """Check resale compliance against policies"""
205
 
206
  violations = []
207
  price_ratio = proposed_price / original_price
208
 
209
  # Policy checks
210
- if price_ratio > 2.0:
211
- violations.append("price_exceeds_2x")
 
 
212
 
213
  if scalping_detection.is_scalper:
214
  violations.append("suspected_scalper")
215
 
216
- if scalping_detection.purchase_velocity > 5:
217
  violations.append("excessive_purchase_velocity")
 
 
218
 
219
- resale_frequency = mock_db.get_resale_frequency(user_id)
220
- if resale_frequency > 4:
 
 
 
 
 
 
 
221
  violations.append("monthly_resale_limit_exceeded")
 
 
222
 
223
- is_compliant = len(violations) == 0
224
- resale_allowed = is_compliant and not scalping_detection.is_scalper
225
 
 
 
 
226
  max_allowed_price = original_price * 2.0
227
 
 
228
  if resale_allowed:
229
- recommendation = "Transaction approved - complies with all policies"
230
  else:
231
- recommendation = f"Transaction blocked due to: {', '.join(violations)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
232
 
233
  return ResaleCompliance(
234
  is_compliant=is_compliant,
235
  violations=violations,
236
  resale_allowed=resale_allowed,
237
  max_allowed_price=max_allowed_price,
238
- recommendation=recommendation
 
239
  )
240
 
241
- # ========== MAIN APPLICATION ==========
242
- class AntiScalpingSystem:
 
243
  def __init__(self):
244
- self.verification_engine = UserVerificationEngine()
245
- self.scalping_engine = ScalpingDetectionEngine()
246
- self.pricing_engine = DynamicPricingEngine()
247
- self.compliance_engine = ComplianceEngine()
248
 
249
- def process_ticket_transaction(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
250
  self,
251
  name: str,
252
- email: str,
253
  user_id: str,
254
  event_name: str,
255
  ticket_type: str,
@@ -258,74 +851,141 @@ class AntiScalpingSystem:
258
  demand_level: str,
259
  proposed_resale_price: float
260
  ) -> Dict[str, Any]:
261
- """Process a ticket transaction through the anti-scalping system"""
262
 
263
  try:
264
- # Step 1: User Verification
265
- verification = self.verification_engine.verify_user(name, email, user_id)
 
 
 
 
266
 
267
- # Step 2: Scalping Detection
268
- scalping_detection = self.scalping_engine.detect_scalping(
269
- user_id, ticket_quantity, event_name
270
  )
271
 
272
- # Step 3: Dynamic Pricing
273
- pricing = self.pricing_engine.calculate_pricing(
274
- original_price, demand_level, proposed_resale_price
275
  )
276
 
277
- # Step 4: Compliance Check
278
- compliance = self.compliance_engine.check_compliance(
279
- user_id, proposed_resale_price, original_price, scalping_detection
 
 
 
 
 
 
 
 
 
 
280
  )
281
 
282
- # Step 5: Final Decision
283
- final_decision = "APPROVED" if (
284
- verification.is_verified and
285
- compliance.resale_allowed and
286
- not scalping_detection.is_scalper
287
- ) else "DENIED"
288
-
289
- # Action items
290
- action_items = []
291
- if final_decision == "APPROVED":
292
- action_items.append("βœ… Process ticket resale")
293
- action_items.append("πŸ“Š Monitor user activity")
294
- else:
295
- action_items.append("❌ Block transaction")
296
- if scalping_detection.is_scalper:
297
- action_items.append("🚨 Flag user account for review")
298
- if not verification.is_verified:
299
- action_items.append("πŸ“‹ Require additional verification")
300
-
301
- # Create report
302
- report = TicketingSystemReport(
303
  verification=verification,
304
  scalping_detection=scalping_detection,
305
  pricing=pricing,
306
  compliance=compliance,
307
  final_decision=final_decision,
308
- action_items=action_items
 
 
309
  )
310
 
311
  return report.dict()
312
 
313
  except Exception as e:
314
- logger.error(f"Error processing transaction: {e}")
315
  return self._create_error_response(str(e))
316
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
317
  def _create_error_response(self, error_msg: str) -> Dict[str, Any]:
318
- """Create error response"""
319
  return {
320
  "error": True,
321
  "message": f"System error: {error_msg}",
322
  "final_decision": "ERROR",
323
- "action_items": ["πŸ”§ Contact system administrator"]
 
 
324
  }
325
 
326
  # ========== GRADIO INTERFACE ==========
327
  def create_interface():
328
- system = AntiScalpingSystem()
329
 
330
  def process_transaction(
331
  name, email, user_id, event_name, ticket_type,
@@ -348,34 +1008,45 @@ def create_interface():
348
  except (ValueError, TypeError):
349
  return "❌ **Error**: Please enter valid numbers for prices and quantity"
350
 
351
- # Process through the system
352
- result = system.process_ticket_transaction(
353
- name=name,
354
- email=email,
355
- user_id=user_id,
356
- event_name=event_name,
357
- ticket_type=ticket_type,
358
- ticket_quantity=ticket_quantity,
359
- original_price=original_price,
360
- demand_level=demand_level,
361
- proposed_resale_price=proposed_resale_price
362
- )
 
 
 
 
 
 
 
363
 
364
  # Handle errors
365
  if result.get("error"):
366
  return f"❌ **System Error**: {result.get('message', 'Unknown error occurred')}"
367
 
368
- # Format the output
369
  decision_emoji = "βœ…" if result['final_decision'] == "APPROVED" else "❌"
 
370
 
371
  output = f"""
372
- # 🎫 Anti-Scalping System Analysis Report
373
 
374
  ## {decision_emoji} Final Decision: **{result['final_decision']}**
 
 
 
375
 
376
  ---
377
 
378
- ## πŸ‘€ User Verification
379
  - **User ID**: `{result['verification']['user_id']}`
380
  - **Name**: {result['verification']['name']}
381
  - **Email**: {result['verification']['email']}
@@ -383,19 +1054,36 @@ def create_interface():
383
  - **Risk Score**: {result['verification']['risk_score']:.1%} {'🟒' if result['verification']['risk_score'] < 0.3 else '🟑' if result['verification']['risk_score'] < 0.6 else 'πŸ”΄'}
384
  - **Verification Level**: {result['verification']['verification_level'].title()}
385
 
386
- ## πŸ” Scalping Detection Analysis
 
 
 
 
 
387
  - **Scalper Detected**: {'🚨 YES' if result['scalping_detection']['is_scalper'] else 'βœ… NO'}
388
  - **Detection Confidence**: {result['scalping_detection']['confidence']:.1%}
389
  - **Purchase Velocity**: {result['scalping_detection']['purchase_velocity']} purchases/hour
390
  - **IP Address Duplicates**: {result['scalping_detection']['ip_duplicates']} accounts
 
391
  - **Red Flags**: {', '.join(result['scalping_detection']['flags']) if result['scalping_detection']['flags'] else 'βœ… None detected'}
392
 
393
- ## πŸ’° Dynamic Pricing Analysis
 
 
 
 
 
394
  - **Original Price**: ${result['pricing']['original_price']:.2f}
395
  - **Proposed Resale**: ${proposed_resale_price:.2f}
396
- - **Recommended Price**: ${result['pricing']['recommended_resale_price']:.2f}
397
- - **Demand Level**: {result['pricing']['demand_level'].title()} πŸ“ˆ
398
  - **Price Ratio**: {result['pricing']['recommended_resale_price']/result['pricing']['original_price']:.2f}x
 
 
 
 
 
 
399
  """
400
 
401
  if result['pricing'].get('profit_margin'):
@@ -403,48 +1091,85 @@ def create_interface():
403
  elif result['pricing'].get('loss_percentage'):
404
  output += f"- **Loss**: -{result['pricing']['loss_percentage']:.1f}% πŸ“‰\n"
405
 
406
- output += f"""- **Reason**: {result['pricing']['price_adjustment_reason']}
 
 
 
 
407
 
408
- ## βœ… Compliance Evaluation
409
  - **Policy Compliant**: {'βœ… Yes' if result['compliance']['is_compliant'] else '❌ No'}
410
  - **Resale Permitted**: {'βœ… Yes' if result['compliance']['resale_allowed'] else '❌ No'}
411
  - **Maximum Allowed**: ${result['compliance']['max_allowed_price']:.2f}
412
- - **Policy Violations**: {', '.join(result['compliance']['violations']) if result['compliance']['violations'] else 'βœ… None'}
413
- - **Recommendation**: {result['compliance']['recommendation']}
 
 
414
 
415
  ---
416
 
417
- ## πŸ“‹ Required Actions
418
  """
419
  for i, action in enumerate(result['action_items'], 1):
420
  output += f"{i}. {action}\n"
421
 
422
  # Add summary box
423
  if result['final_decision'] == "APPROVED":
424
- output += """
425
- > βœ… **TRANSACTION APPROVED** - All checks passed. Resale can proceed as recommended.
 
 
 
 
 
 
426
  """
427
  else:
428
- output += """
429
- > ❌ **TRANSACTION BLOCKED** - Policy violations detected. Review required before proceeding.
 
 
 
 
 
 
 
 
 
 
 
 
430
  """
431
 
432
  return output
433
 
434
- # Create Gradio interface with better styling
435
  with gr.Blocks(
436
- title="🎫 Anti-Scalping Ticketing System",
437
  theme=gr.themes.Soft(
438
  primary_hue="blue",
439
- secondary_hue="slate",
440
- )
 
 
 
 
 
441
  ) as interface:
442
 
443
  gr.Markdown("""
444
- # 🎫 AI-Powered Anti-Scalping Ticketing System
445
 
446
- This intelligent system prevents ticket scalping by analyzing user behavior, verifying identities,
447
- and ensuring fair pricing compliance. Enter transaction details below for real-time analysis.
 
 
 
 
 
 
 
 
448
 
449
  ---
450
  """)
@@ -460,7 +1185,7 @@ def create_interface():
460
  event_name = gr.Textbox(label="Event Name", placeholder="Taylor Swift - Eras Tour", value="")
461
  ticket_type = gr.Dropdown(
462
  label="Ticket Type",
463
- choices=["General Admission", "VIP", "Premium", "Standard", "Balcony", "Floor"],
464
  value="Standard"
465
  )
466
  ticket_quantity = gr.Number(label="Number of Tickets", value=1, minimum=1, maximum=10, precision=0)
@@ -474,10 +1199,19 @@ def create_interface():
474
  )
475
  proposed_resale_price = gr.Number(label="Proposed Resale Price ($)", value=150, minimum=1)
476
 
477
- submit_btn = gr.Button("πŸ” Analyze Transaction", variant="primary", size="lg")
478
 
479
  with gr.Column(scale=2):
480
- output = gr.Markdown(value="πŸ‘† Fill in the form and click 'Analyze Transaction' to get started!")
 
 
 
 
 
 
 
 
 
481
 
482
  # Event handlers
483
  submit_btn.click(
@@ -498,13 +1232,26 @@ def create_interface():
498
  ["John Smith", "john@gmail.com", "USER001", "Taylor Swift - Eras Tour", "VIP", 2, 500, "high", 750],
499
  ["Jane Doe", "jane@company.com", "USER002", "NBA Finals Game 7", "Premium", 4, 300, "high", 1200],
500
  ["Bob Wilson", "bob@email.com", "USER003", "Local Concert", "General Admission", 1, 50, "low", 40],
501
- ["Scalper Bot", "temp@fake.com", "BOT999", "Popular Event", "Standard", 8, 100, "high", 300],
 
502
  ],
503
  inputs=[
504
  name, email, user_id, event_name, ticket_type,
505
  ticket_quantity, original_price, demand_level, proposed_resale_price
506
  ]
507
  )
 
 
 
 
 
 
 
 
 
 
 
 
508
 
509
  return interface
510
 
 
3
  import json
4
  import logging
5
  import random
6
+ import asyncio
7
+ import aiohttp
8
  from datetime import datetime, timedelta
9
+ from typing import Dict, Any, List, Optional, Tuple
10
  from pydantic import BaseModel, Field
11
  import gradio as gr
12
+ import google.generativeai as genai
13
+ from dataclasses import dataclass
14
 
15
  # Setup logging
16
  logging.basicConfig(
 
19
  )
20
  logger = logging.getLogger(__name__)
21
 
22
+ # ========== CONFIGURATION ==========
23
+ @dataclass
24
+ class Config:
25
+ GEMINI_API_KEY: str = os.getenv("GEMINI_API_KEY", "")
26
+ SERPER_API_KEY: str = os.getenv("SERPER_API_KEY", "")
27
+ GEMINI_MODEL: str = "gemini-1.5-flash"
28
+ MAX_RETRIES: int = 3
29
+ TIMEOUT: int = 30
30
+
31
+ config = Config()
32
+
33
+ # ========== ENHANCED DATA MODELS ==========
34
  class UserVerification(BaseModel):
35
  user_id: str = Field(..., description="User ID")
36
  name: str = Field(..., description="User name")
 
38
  is_verified: bool = Field(..., description="KYC verification status")
39
  verification_level: str = Field(..., description="Verification level: basic, standard, premium")
40
  risk_score: float = Field(..., description="Risk score 0-1")
41
+ ai_behavioral_analysis: str = Field("", description="AI analysis of user behavior")
42
+
43
+ class MarketIntelligence(BaseModel):
44
+ average_resale_price: float = Field(0.0, description="Average market resale price")
45
+ price_trend: str = Field("stable", description="Price trend: rising, falling, stable")
46
+ market_sentiment: str = Field("neutral", description="Market sentiment: positive, negative, neutral")
47
+ similar_events_pricing: List[Dict] = Field(default_factory=list, description="Similar events pricing data")
48
+ news_impact: str = Field("", description="News impact on pricing")
49
+ social_media_buzz: str = Field("", description="Social media sentiment analysis")
50
+ supply_demand_ratio: float = Field(1.0, description="Supply vs demand ratio")
51
 
52
  class ScalpingDetection(BaseModel):
53
  is_scalper: bool = Field(..., description="Whether user is detected as scalper")
 
55
  flags: List[str] = Field(..., description="Suspicious activity flags")
56
  purchase_velocity: int = Field(..., description="Number of purchases in last hour")
57
  ip_duplicates: int = Field(..., description="Number of accounts from same IP")
58
+ ai_pattern_analysis: str = Field("", description="AI analysis of behavior patterns")
59
+ network_connections: List[str] = Field(default_factory=list, description="Connected suspicious accounts")
60
 
61
  class PricingRecommendation(BaseModel):
62
  original_price: float = Field(..., description="Original ticket price")
63
  recommended_resale_price: float = Field(..., description="Recommended resale price")
64
+ market_fair_price: float = Field(..., description="AI-calculated fair market price")
65
  demand_level: str = Field(..., description="Current demand level")
66
  price_adjustment_reason: str = Field(..., description="Reason for price adjustment")
67
  profit_margin: Optional[float] = Field(None, description="Profit margin percentage")
68
  loss_percentage: Optional[float] = Field(None, description="Loss percentage if selling below cost")
69
+ market_intelligence: MarketIntelligence = Field(default_factory=MarketIntelligence)
70
+ ai_pricing_rationale: str = Field("", description="AI explanation for pricing decision")
71
 
72
  class ResaleCompliance(BaseModel):
73
  is_compliant: bool = Field(..., description="Whether resale is policy compliant")
 
75
  resale_allowed: bool = Field(..., description="Whether resale is allowed")
76
  max_allowed_price: float = Field(..., description="Maximum allowed resale price")
77
  recommendation: str = Field(..., description="Compliance recommendation")
78
+ ai_policy_analysis: str = Field("", description="AI analysis of policy compliance")
79
 
80
+ class IntelligentReport(BaseModel):
81
  verification: UserVerification
82
  scalping_detection: ScalpingDetection
83
  pricing: PricingRecommendation
84
  compliance: ResaleCompliance
85
  final_decision: str = Field(..., description="Final system decision")
86
  action_items: List[str] = Field(..., description="Recommended actions")
87
+ ai_summary: str = Field("", description="AI-generated executive summary")
88
+ confidence_score: float = Field(0.0, description="Overall AI confidence in decision")
89
 
90
+ # ========== AI INTEGRATION SERVICES ==========
91
+ class GeminiAIService:
92
+ """Service for interacting with Google's Gemini AI"""
93
+
94
  def __init__(self):
95
+ if not config.GEMINI_API_KEY:
96
+ logger.warning("GEMINI_API_KEY not found. Using fallback responses.")
97
+ self.enabled = False
98
+ return
99
+
100
+ try:
101
+ genai.configure(api_key=config.GEMINI_API_KEY)
102
+ self.model = genai.GenerativeModel(config.GEMINI_MODEL)
103
+ self.enabled = True
104
+ logger.info("Gemini AI service initialized successfully")
105
+ except Exception as e:
106
+ logger.error(f"Failed to initialize Gemini AI: {e}")
107
+ self.enabled = False
108
 
109
+ async def analyze_user_behavior(self, user_data: Dict, transaction_history: List[Dict]) -> str:
110
+ """Analyze user behavior patterns using AI"""
111
+ if not self.enabled:
112
+ return self._fallback_user_analysis(user_data)
113
+
114
+ try:
115
+ prompt = f"""
116
+ As an expert fraud detection analyst, analyze this user's behavior for potential ticket scalping:
117
+
118
+ User Profile:
119
+ - Name: {user_data.get('name', 'N/A')}
120
+ - Email: {user_data.get('email', 'N/A')}
121
+ - User ID: {user_data.get('user_id', 'N/A')}
122
+ - Risk Factors: {user_data.get('risk_factors', [])}
123
+
124
+ Transaction History: {json.dumps(transaction_history, indent=2)}
125
+
126
+ Provide a concise behavioral analysis focusing on:
127
+ 1. Email legitimacy indicators
128
+ 2. Name authenticity assessment
129
+ 3. Overall trustworthiness score
130
+ 4. Red flags or green flags
131
+
132
+ Keep response under 150 words and be specific.
133
+ """
134
+
135
+ response = self.model.generate_content(prompt)
136
+ return response.text if response.text else self._fallback_user_analysis(user_data)
137
+
138
+ except Exception as e:
139
+ logger.error(f"Gemini AI analysis failed: {e}")
140
+ return self._fallback_user_analysis(user_data)
141
 
142
+ async def analyze_scalping_patterns(self, user_id: str, transaction_data: Dict, network_data: List[Dict]) -> str:
143
+ """Detect scalping patterns using advanced AI analysis"""
144
+ if not self.enabled:
145
+ return self._fallback_scalping_analysis(transaction_data)
146
+
147
+ try:
148
+ prompt = f"""
149
+ As a specialized anti-scalping detection expert, analyze this transaction for suspicious patterns:
150
+
151
+ Transaction Details:
152
+ - User: {user_id}
153
+ - Ticket Quantity: {transaction_data.get('ticket_quantity', 0)}
154
+ - Event: {transaction_data.get('event_name', 'N/A')}
155
+ - Purchase Velocity: {transaction_data.get('purchase_velocity', 0)} purchases/hour
156
+ - IP Duplicates: {transaction_data.get('ip_duplicates', 0)} accounts
157
+
158
+ Network Analysis: {json.dumps(network_data, indent=2)}
159
+
160
+ Assess for:
161
+ 1. Bot-like purchasing behavior
162
+ 2. Coordinated scalping networks
163
+ 3. Unusual timing patterns
164
+ 4. Risk level (Low/Medium/High)
165
+
166
+ Provide analysis in under 200 words with specific indicators.
167
+ """
168
+
169
+ response = self.model.generate_content(prompt)
170
+ return response.text if response.text else self._fallback_scalping_analysis(transaction_data)
171
+
172
+ except Exception as e:
173
+ logger.error(f"Scalping pattern analysis failed: {e}")
174
+ return self._fallback_scalping_analysis(transaction_data)
175
 
176
+ async def generate_pricing_rationale(self, market_data: Dict, pricing_factors: Dict) -> str:
177
+ """Generate AI-powered pricing rationale"""
178
+ if not self.enabled:
179
+ return self._fallback_pricing_analysis(pricing_factors)
180
+
181
+ try:
182
+ prompt = f"""
183
+ As a market pricing expert, analyze this ticket resale pricing scenario:
184
+
185
+ Market Conditions:
186
+ - Average Market Price: ${market_data.get('average_resale_price', 0):.2f}
187
+ - Price Trend: {market_data.get('price_trend', 'stable')}
188
+ - Market Sentiment: {market_data.get('market_sentiment', 'neutral')}
189
+ - Supply/Demand Ratio: {market_data.get('supply_demand_ratio', 1.0)}
190
+
191
+ Pricing Analysis:
192
+ - Original Price: ${pricing_factors.get('original_price', 0):.2f}
193
+ - Proposed Price: ${pricing_factors.get('proposed_price', 0):.2f}
194
+ - Recommended Price: ${pricing_factors.get('recommended_price', 0):.2f}
195
+ - Demand Level: {pricing_factors.get('demand_level', 'medium')}
196
+
197
+ Provide:
198
+ 1. Fair market assessment
199
+ 2. Consumer protection analysis
200
+ 3. Pricing recommendation rationale
201
+ 4. Risk factors for buyer/seller
202
+
203
+ Keep under 250 words, be specific and actionable.
204
+ """
205
+
206
+ response = self.model.generate_content(prompt)
207
+ return response.text if response.text else self._fallback_pricing_analysis(pricing_factors)
208
+
209
+ except Exception as e:
210
+ logger.error(f"Pricing rationale generation failed: {e}")
211
+ return self._fallback_pricing_analysis(pricing_factors)
212
+
213
+ async def analyze_policy_compliance(self, transaction_data: Dict, policy_violations: List[str]) -> str:
214
+ """Analyze policy compliance with AI reasoning"""
215
+ if not self.enabled:
216
+ return self._fallback_compliance_analysis(policy_violations)
217
+
218
+ try:
219
+ prompt = f"""
220
+ As a policy compliance officer, evaluate this transaction:
221
+
222
+ Transaction:
223
+ - User: {transaction_data.get('user_id', 'N/A')}
224
+ - Price Ratio: {transaction_data.get('price_ratio', 1.0)}x original
225
+ - Event: {transaction_data.get('event_name', 'N/A')}
226
+ - Proposed Price: ${transaction_data.get('proposed_price', 0):.2f}
227
+ - Original Price: ${transaction_data.get('original_price', 0):.2f}
228
+
229
+ Policy Violations Detected: {policy_violations}
230
+
231
+ Provide:
232
+ 1. Severity assessment of each violation
233
+ 2. Consumer protection implications
234
+ 3. Recommended enforcement actions
235
+ 4. Appeal process guidance (if applicable)
236
+
237
+ Be fair but firm in protecting consumers. Under 200 words.
238
+ """
239
+
240
+ response = self.model.generate_content(prompt)
241
+ return response.text if response.text else self._fallback_compliance_analysis(policy_violations)
242
+
243
+ except Exception as e:
244
+ logger.error(f"Policy compliance analysis failed: {e}")
245
+ return self._fallback_compliance_analysis(policy_violations)
246
+
247
+ async def generate_executive_summary(self, full_report: Dict) -> Tuple[str, float]:
248
+ """Generate an executive summary with confidence score"""
249
+ if not self.enabled:
250
+ return self._fallback_executive_summary(full_report)
251
+
252
+ try:
253
+ prompt = f"""
254
+ Create an executive summary for this anti-scalping analysis:
255
+
256
+ Decision: {full_report.get('final_decision', 'UNKNOWN')}
257
+ User Verified: {full_report.get('verification', {}).get('is_verified', False)}
258
+ Scalper Detected: {full_report.get('scalping_detection', {}).get('is_scalper', False)}
259
+ Violations: {full_report.get('compliance', {}).get('violations', [])}
260
+
261
+ Provide:
262
+ 1. One sentence decision summary
263
+ 2. Key risk factors (2-3 points)
264
+ 3. Confidence level (0.0-1.0)
265
+
266
+ Format: SUMMARY | CONFIDENCE_SCORE
267
+ Example: "Transaction approved with moderate monitoring recommended due to acceptable risk profile. | 0.85"
268
+
269
+ Keep summary under 100 words.
270
+ """
271
+
272
+ response = self.model.generate_content(prompt)
273
+ if response.text and '|' in response.text:
274
+ parts = response.text.split('|')
275
+ summary = parts[0].strip()
276
+ try:
277
+ confidence = float(parts[1].strip())
278
+ confidence = max(0.0, min(1.0, confidence)) # Ensure 0-1 range
279
+ except:
280
+ confidence = 0.7
281
+ return summary, confidence
282
+ else:
283
+ return self._fallback_executive_summary(full_report)
284
+
285
+ except Exception as e:
286
+ logger.error(f"Executive summary generation failed: {e}")
287
+ return self._fallback_executive_summary(full_report)
288
+
289
+ # Fallback methods when AI is not available
290
+ def _fallback_user_analysis(self, user_data: Dict) -> str:
291
+ risk_factors = user_data.get('risk_factors', [])
292
+ if not risk_factors:
293
+ return "User profile appears legitimate with standard email domain and proper name format. No immediate red flags detected."
294
+ else:
295
+ return f"User profile shows some concerns: {', '.join(risk_factors)}. Recommend enhanced verification for high-value transactions."
296
+
297
+ def _fallback_scalping_analysis(self, transaction_data: Dict) -> str:
298
+ velocity = transaction_data.get('purchase_velocity', 0)
299
+ quantity = transaction_data.get('ticket_quantity', 0)
300
+
301
+ if velocity > 5 or quantity > 6:
302
+ return "High-risk transaction pattern detected. Rapid purchasing behavior and bulk quantities suggest potential scalping activity. Recommend blocking pending manual review."
303
+ elif velocity > 3 or quantity > 4:
304
+ return "Moderate risk factors present. Purchase velocity and quantity are elevated but within acceptable ranges for enthusiastic fans. Monitor closely."
305
+ else:
306
+ return "Normal purchasing behavior observed. Transaction patterns consistent with legitimate fan purchases."
307
+
308
+ def _fallback_pricing_analysis(self, pricing_factors: Dict) -> str:
309
+ original = pricing_factors.get('original_price', 0)
310
+ proposed = pricing_factors.get('proposed_price', 0)
311
+ ratio = proposed / original if original > 0 else 1.0
312
+
313
+ if ratio > 2.0:
314
+ return "Proposed price significantly exceeds policy limits (2x original). High risk of consumer exploitation. Recommend price reduction to comply with anti-gouging policies."
315
+ elif ratio > 1.5:
316
+ return "Price markup is substantial but within policy limits. Market demand may justify premium, but monitor for consumer complaints."
317
+ else:
318
+ return "Pricing appears fair and reasonable. Markup reflects normal market dynamics and demand levels."
319
+
320
+ def _fallback_compliance_analysis(self, violations: List[str]) -> str:
321
+ if not violations:
322
+ return "Transaction complies with all anti-scalping policies. No violations detected. Safe to proceed with standard monitoring."
323
+ else:
324
+ severity = "High" if len(violations) > 2 else "Medium" if len(violations) > 1 else "Low"
325
+ return f"{severity} severity violations detected: {', '.join(violations)}. Recommend blocking transaction pending policy review and user education."
326
+
327
+ def _fallback_executive_summary(self, full_report: Dict) -> Tuple[str, float]:
328
+ decision = full_report.get('final_decision', 'UNKNOWN')
329
+ violations = len(full_report.get('compliance', {}).get('violations', []))
330
+
331
+ if decision == "APPROVED":
332
+ summary = "Transaction approved with standard monitoring protocols. Risk factors within acceptable thresholds."
333
+ confidence = 0.8 - (violations * 0.1)
334
+ else:
335
+ summary = "Transaction blocked due to policy violations and elevated risk indicators. Manual review required."
336
+ confidence = 0.9 - (violations * 0.05)
337
+
338
+ return summary, max(0.1, min(0.95, confidence))
339
 
340
+ class SerperAPIService:
341
+ """Service for real-time market data using Serper API"""
342
+
343
+ def __init__(self):
344
+ if not config.SERPER_API_KEY:
345
+ logger.warning("SERPER_API_KEY not found. Using simulated market data.")
346
+ self.enabled = False
347
+ else:
348
+ self.enabled = True
349
+ self.base_url = "https://google.serper.dev/search"
350
+ logger.info("Serper API service initialized successfully")
351
+
352
+ async def get_market_intelligence(self, event_name: str, ticket_type: str, location: str = "") -> MarketIntelligence:
353
+ """Fetch real-time market intelligence for ticket pricing"""
354
+ if not self.enabled:
355
+ return self._simulate_market_data(event_name, ticket_type)
356
+
357
+ try:
358
+ # Search for current ticket prices and market data
359
+ queries = [
360
+ f"{event_name} {ticket_type} ticket prices resale",
361
+ f"{event_name} tickets secondary market pricing",
362
+ f"{event_name} ticket demand social media"
363
+ ]
364
+
365
+ all_results = []
366
+ async with aiohttp.ClientSession() as session:
367
+ for query in queries:
368
+ result = await self._search_query(session, query)
369
+ if result:
370
+ all_results.extend(result.get('organic', []))
371
+
372
+ # Analyze results for market intelligence
373
+ return await self._analyze_market_data(all_results, event_name)
374
+
375
+ except Exception as e:
376
+ logger.error(f"Market intelligence gathering failed: {e}")
377
+ return self._simulate_market_data(event_name, ticket_type)
378
+
379
+ async def _search_query(self, session: aiohttp.ClientSession, query: str) -> Optional[Dict]:
380
+ """Execute a search query via Serper API"""
381
+ try:
382
+ headers = {
383
+ "X-API-KEY": config.SERPER_API_KEY,
384
+ "Content-Type": "application/json"
385
+ }
386
+
387
+ payload = {
388
+ "q": query,
389
+ "num": 5,
390
+ "gl": "us",
391
+ "hl": "en"
392
+ }
393
+
394
+ async with session.post(self.base_url, headers=headers, json=payload, timeout=config.TIMEOUT) as response:
395
+ if response.status == 200:
396
+ return await response.json()
397
+ else:
398
+ logger.warning(f"Serper API returned status {response.status}")
399
+ return None
400
+
401
+ except Exception as e:
402
+ logger.error(f"Serper API query failed: {e}")
403
+ return None
404
+
405
+ async def _analyze_market_data(self, search_results: List[Dict], event_name: str) -> MarketIntelligence:
406
+ """Analyze search results to extract market intelligence"""
407
+ try:
408
+ sentiment_indicators = []
409
+ news_items = []
410
+
411
+ for result in search_results[:10]: # Limit processing
412
+ title = result.get('title', '').lower()
413
+ snippet = result.get('snippet', '').lower()
414
+
415
+ # Look for sentiment indicators
416
+ if any(word in title + snippet for word in ['sold out', 'high demand', 'popular', 'rush']):
417
+ sentiment_indicators.append('high_demand')
418
+ elif any(word in title + snippet for word in ['available', 'discount', 'cheap']):
419
+ sentiment_indicators.append('low_demand')
420
+
421
+ # Look for news impact
422
+ if any(word in title + snippet for word in ['news', 'announced', 'cancelled', 'postponed']):
423
+ news_items.append(result)
424
+
425
+ # Calculate market metrics
426
+ sentiment = self._calculate_sentiment(sentiment_indicators)
427
+ trend = random.choice(["rising", "stable", "falling"]) # Simplified
428
+ avg_price = random.uniform(100, 800) # Simplified
429
+
430
+ return MarketIntelligence(
431
+ average_resale_price=avg_price,
432
+ price_trend=trend,
433
+ market_sentiment=sentiment,
434
+ similar_events_pricing=[],
435
+ news_impact=f"Analyzed {len(news_items)} news items affecting {event_name} pricing",
436
+ social_media_buzz=f"Market sentiment derived from {len(sentiment_indicators)} social indicators",
437
+ supply_demand_ratio=self._calculate_supply_demand_ratio(sentiment_indicators)
438
+ )
439
+
440
+ except Exception as e:
441
+ logger.error(f"Market data analysis failed: {e}")
442
+ return self._simulate_market_data(event_name, "standard")
443
+
444
+ def _simulate_market_data(self, event_name: str, ticket_type: str) -> MarketIntelligence:
445
+ """Generate simulated market data when API is unavailable"""
446
+ # Generate realistic market simulation based on event characteristics
447
+ base_price = 200
448
+ if "vip" in ticket_type.lower():
449
+ base_price = 500
450
+ elif "premium" in ticket_type.lower():
451
+ base_price = 350
452
+
453
+ # Simulate market conditions
454
+ sentiment_score = random.uniform(0, 1)
455
+ if sentiment_score > 0.7:
456
+ sentiment = "positive"
457
+ trend = "rising"
458
+ avg_price = base_price * random.uniform(1.2, 1.8)
459
+ supply_demand = random.uniform(0.3, 0.7)
460
+ elif sentiment_score > 0.3:
461
+ sentiment = "neutral"
462
+ trend = "stable"
463
+ avg_price = base_price * random.uniform(0.9, 1.3)
464
+ supply_demand = random.uniform(0.8, 1.2)
465
+ else:
466
+ sentiment = "negative"
467
+ trend = "falling"
468
+ avg_price = base_price * random.uniform(0.6, 1.1)
469
+ supply_demand = random.uniform(1.2, 2.0)
470
+
471
+ return MarketIntelligence(
472
+ average_resale_price=avg_price,
473
+ price_trend=trend,
474
+ market_sentiment=sentiment,
475
+ similar_events_pricing=[],
476
+ news_impact=f"Simulated market analysis for {event_name}",
477
+ social_media_buzz="Market sentiment analysis based on event characteristics",
478
+ supply_demand_ratio=supply_demand
479
+ )
480
+
481
+ def _calculate_sentiment(self, indicators: List[str]) -> str:
482
+ """Calculate overall market sentiment"""
483
+ if not indicators:
484
+ return "neutral"
485
+
486
+ positive_count = sum(1 for i in indicators if i == 'high_demand')
487
+ negative_count = sum(1 for i in indicators if i == 'low_demand')
488
+
489
+ if positive_count > negative_count:
490
+ return "positive"
491
+ elif negative_count > positive_count:
492
+ return "negative"
493
+ else:
494
+ return "neutral"
495
+
496
+ def _calculate_supply_demand_ratio(self, indicators: List[str]) -> float:
497
+ """Calculate supply vs demand ratio"""
498
+ high_demand = sum(1 for i in indicators if i == 'high_demand')
499
+ low_demand = sum(1 for i in indicators if i == 'low_demand')
500
+
501
+ if high_demand > low_demand:
502
+ return random.uniform(0.3, 0.7) # Low supply, high demand
503
+ elif low_demand > high_demand:
504
+ return random.uniform(1.3, 2.0) # High supply, low demand
505
+ else:
506
+ return random.uniform(0.8, 1.2) # Balanced
507
 
508
+ # ========== ENHANCED ENGINES ==========
509
+ class EnhancedUserVerificationEngine:
510
+ def __init__(self):
511
+ self.ai_service = GeminiAIService()
512
+
513
+ async def verify_user(self, name: str, email: str, user_id: str) -> UserVerification:
514
+ """Enhanced user verification with AI analysis"""
515
 
516
+ # Basic validation
517
  email_valid = "@" in email and "." in email.split("@")[-1]
 
 
518
  name_valid = len(name.strip()) >= 2 and not any(char.isdigit() for char in name)
519
 
 
520
  risk_factors = []
521
  if not email_valid:
522
  risk_factors.append("invalid_email")
523
  if not name_valid:
524
  risk_factors.append("suspicious_name")
525
+ if email.endswith(('.temp', '.fake', '.test', '.10minutemail', '.guerrillamail')):
526
  risk_factors.append("temporary_email")
527
+ if len(name.strip()) < 3:
528
+ risk_factors.append("short_name")
529
 
530
+ risk_score = min(0.9, len(risk_factors) * 0.25 + random.uniform(0.1, 0.2))
531
  is_verified = risk_score < 0.5 and email_valid and name_valid
 
532
  verification_level = "premium" if risk_score < 0.2 else "standard" if risk_score < 0.5 else "basic"
533
 
534
+ # AI behavioral analysis
535
+ user_data = {
536
+ "name": name,
537
+ "email": email,
538
+ "user_id": user_id,
539
+ "risk_factors": risk_factors
540
+ }
541
+
542
+ transaction_history = [
543
+ {
544
+ "event": "previous_purchase",
545
+ "quantity": random.randint(1, 3),
546
+ "timestamp": (datetime.now() - timedelta(days=random.randint(1, 30))).isoformat()
547
+ }
548
+ ]
549
+
550
+ ai_analysis = await self.ai_service.analyze_user_behavior(user_data, transaction_history)
551
+
552
  return UserVerification(
553
  user_id=user_id,
554
  name=name,
555
  email=email,
556
  is_verified=is_verified,
557
  verification_level=verification_level,
558
+ risk_score=risk_score,
559
+ ai_behavioral_analysis=ai_analysis
560
  )
561
 
562
+ class EnhancedScalpingDetectionEngine:
563
+ def __init__(self):
564
+ self.ai_service = GeminiAIService()
565
+
566
+ async def detect_scalping(self, user_id: str, ticket_quantity: int, event_name: str) -> ScalpingDetection:
567
+ """Enhanced scalping detection with AI pattern analysis"""
568
+
569
+ # Generate consistent data based on user_id for demo
570
+ random.seed(hash(user_id) % 2147483647)
571
 
572
  flags = []
573
+ purchase_velocity = random.randint(1, 8)
574
+ ip_duplicates = random.randint(1, 5)
575
+ resale_frequency = random.randint(0, 12)
576
 
 
577
  if purchase_velocity > 3:
578
  flags.append("rapid_purchases")
579
  if ip_duplicates > 2:
 
582
  flags.append("bulk_purchase")
583
  if resale_frequency > 5:
584
  flags.append("frequent_reseller")
585
+ if ticket_quantity > 6:
586
+ flags.append("excessive_quantity")
587
 
588
  # Calculate scalping probability
589
  scalping_score = (
590
+ (purchase_velocity / 10) * 0.25 +
591
+ (ip_duplicates / 5) * 0.25 +
592
+ (ticket_quantity / 10) * 0.25 +
593
+ (resale_frequency / 20) * 0.25
594
  )
595
 
596
+ # Add randomness for bot detection
597
+ if purchase_velocity > 6 and ticket_quantity > 5:
598
+ scalping_score += 0.3
599
+
600
+ is_scalper = scalping_score > 0.6
601
+ confidence = min(0.95, scalping_score + random.uniform(0.05, 0.15))
602
+
603
+ # AI pattern analysis
604
+ transaction_data = {
605
+ "user_id": user_id,
606
+ "ticket_quantity": ticket_quantity,
607
+ "event_name": event_name,
608
+ "purchase_velocity": purchase_velocity,
609
+ "ip_duplicates": ip_duplicates
610
+ }
611
+
612
+ network_data = [
613
+ {"connected_user": f"user_{i}", "similarity_score": random.uniform(0.1, 0.9)}
614
+ for i in range(min(3, ip_duplicates))
615
+ ]
616
+
617
+ ai_pattern_analysis = await self.ai_service.analyze_scalping_patterns(user_id, transaction_data, network_data)
618
+
619
+ # Reset random seed
620
+ random.seed()
621
 
622
  return ScalpingDetection(
623
  is_scalper=is_scalper,
624
  confidence=confidence,
625
  flags=flags,
626
  purchase_velocity=purchase_velocity,
627
+ ip_duplicates=ip_duplicates,
628
+ ai_pattern_analysis=ai_pattern_analysis,
629
+ network_connections=[f"suspicious_account_{i}" for i in range(max(0, ip_duplicates - 1))]
630
  )
631
 
632
+ class EnhancedDynamicPricingEngine:
633
+ def __init__(self):
634
+ self.ai_service = GeminiAIService()
635
+ self.market_service = SerperAPIService()
636
+
637
+ async def calculate_pricing(self, original_price: float, demand_level: str, proposed_price: float, event_name: str, ticket_type: str) -> PricingRecommendation:
638
+ """Enhanced pricing calculation with real-time market intelligence"""
639
 
640
+ # Get market intelligence
641
+ market_intel = await self.market_service.get_market_intelligence(event_name, ticket_type)
642
+
643
+ # Base pricing logic
644
  demand_multipliers = {
645
+ "low": (0.7, 1.1),
646
+ "medium": (0.85, 1.3),
647
+ "high": (1.0, 1.6)
648
  }
649
 
650
+ min_mult, max_mult = demand_multipliers.get(demand_level, (0.85, 1.3))
651
+
652
+ # Adjust based on market intelligence
653
+ if market_intel.market_sentiment == "positive":
654
+ max_mult *= 1.15
655
+ elif market_intel.market_sentiment == "negative":
656
+ max_mult *= 0.85
657
+
658
+ if market_intel.price_trend == "rising":
659
+ max_mult *= 1.1
660
+ elif market_intel.price_trend == "falling":
661
+ max_mult *= 0.9
662
 
663
+ # Calculate price bounds
664
  min_price = original_price * min_mult
665
  max_price = original_price * max_mult
666
 
667
+ # Calculate fair market price
668
+ if market_intel.average_resale_price > 0:
669
+ market_fair_price = market_intel.average_resale_price
670
+ else:
671
+ market_fair_price = original_price * (1.0 + (0.5 if demand_level == "high" else 0.2 if demand_level == "medium" else 0.0))
672
+
673
  # Recommend price within acceptable range
674
  recommended_price = max(min_price, min(proposed_price, max_price))
675
 
676
+ # Calculate margins
677
  price_ratio = recommended_price / original_price
 
678
  profit_margin = None
679
  loss_percentage = None
680
 
 
683
  elif price_ratio < 1.0:
684
  loss_percentage = (1 - price_ratio) * 100
685
 
686
+ # Generate pricing reason
687
+ reason = f"Adjusted for {demand_level} demand ({market_intel.market_sentiment} market sentiment, {market_intel.price_trend} trend)"
688
  if recommended_price != proposed_price:
689
+ if recommended_price > proposed_price:
690
+ reason += f" - Price increased from ${proposed_price:.2f} to meet market conditions"
691
+ else:
692
+ reason += f" - Price reduced from ${proposed_price:.2f} for policy compliance"
693
+
694
+ # Generate AI pricing rationale
695
+ pricing_factors = {
696
+ "original_price": original_price,
697
+ "proposed_price": proposed_price,
698
+ "recommended_price": recommended_price,
699
+ "market_sentiment": market_intel.market_sentiment,
700
+ "price_trend": market_intel.price_trend,
701
+ "demand_level": demand_level
702
+ }
703
+
704
+ ai_rationale = await self.ai_service.generate_pricing_rationale(market_intel.dict(), pricing_factors)
705
 
706
  return PricingRecommendation(
707
  original_price=original_price,
708
  recommended_resale_price=recommended_price,
709
+ market_fair_price=market_fair_price,
710
  demand_level=demand_level,
711
  price_adjustment_reason=reason,
712
  profit_margin=profit_margin,
713
+ loss_percentage=loss_percentage,
714
+ market_intelligence=market_intel,
715
+ ai_pricing_rationale=ai_rationale
716
  )
717
 
718
+ class EnhancedComplianceEngine:
719
+ def __init__(self):
720
+ self.ai_service = GeminiAIService()
721
+
722
+ async def check_compliance(
723
+ self,
724
+ user_id: str,
725
+ proposed_price: float,
726
  original_price: float,
727
+ scalping_detection: ScalpingDetection,
728
+ event_name: str
729
  ) -> ResaleCompliance:
730
+ """Enhanced compliance checking with AI policy analysis"""
731
 
732
  violations = []
733
  price_ratio = proposed_price / original_price
734
 
735
  # Policy checks
736
+ if price_ratio > 2.5:
737
+ violations.append("price_exceeds_250_percent")
738
+ elif price_ratio > 2.0:
739
+ violations.append("price_exceeds_200_percent")
740
 
741
  if scalping_detection.is_scalper:
742
  violations.append("suspected_scalper")
743
 
744
+ if scalping_detection.purchase_velocity > 6:
745
  violations.append("excessive_purchase_velocity")
746
+ elif scalping_detection.purchase_velocity > 4:
747
+ violations.append("high_purchase_velocity")
748
 
749
+ if scalping_detection.ip_duplicates > 3:
750
+ violations.append("multiple_ip_accounts")
751
+
752
+ # Generate consistent resale frequency based on user_id
753
+ random.seed(hash(user_id + "resale") % 2147483647)
754
+ resale_frequency = random.randint(0, 10)
755
+ random.seed()
756
+
757
+ if resale_frequency > 6:
758
  violations.append("monthly_resale_limit_exceeded")
759
+ elif resale_frequency > 4:
760
+ violations.append("high_resale_frequency")
761
 
762
+ if "bulk_purchase" in scalping_detection.flags:
763
+ violations.append("bulk_purchase_violation")
764
 
765
+ # Determine compliance
766
+ is_compliant = len(violations) == 0
767
+ resale_allowed = is_compliant and not scalping_detection.is_scalper and price_ratio <= 2.0
768
  max_allowed_price = original_price * 2.0
769
 
770
+ # Generate recommendation
771
  if resale_allowed:
772
+ recommendation = "βœ… Transaction approved - complies with all anti-scalping policies"
773
  else:
774
+ severity = "CRITICAL" if len(violations) > 3 else "HIGH" if len(violations) > 1 else "MODERATE"
775
+ recommendation = f"❌ Transaction blocked ({severity} risk) - Violations: {', '.join(violations)}"
776
+
777
+ # AI policy analysis
778
+ transaction_data = {
779
+ "user_id": user_id,
780
+ "proposed_price": proposed_price,
781
+ "original_price": original_price,
782
+ "price_ratio": price_ratio,
783
+ "event_name": event_name,
784
+ "scalping_indicators": scalping_detection.flags,
785
+ "resale_frequency": resale_frequency
786
+ }
787
+
788
+ ai_policy_analysis = await self.ai_service.analyze_policy_compliance(transaction_data, violations)
789
 
790
  return ResaleCompliance(
791
  is_compliant=is_compliant,
792
  violations=violations,
793
  resale_allowed=resale_allowed,
794
  max_allowed_price=max_allowed_price,
795
+ recommendation=recommendation,
796
+ ai_policy_analysis=ai_policy_analysis
797
  )
798
 
799
+ # ========== MOCK DATABASE (Enhanced) ==========
800
+ class MockDatabase:
801
+ """Enhanced mock database with consistent data generation"""
802
  def __init__(self):
803
+ self.user_purchase_history = {}
804
+ self.ip_addresses = {}
805
+ self.resale_history = {}
806
+ self.user_profiles = {}
807
 
808
+ def get_user_purchases(self, user_id: str) -> int:
809
+ if user_id not in self.user_purchase_history:
810
+ # Generate consistent data based on user_id hash
811
+ random.seed(hash(user_id) % 2147483647)
812
+ self.user_purchase_history[user_id] = random.randint(0, 8)
813
+ random.seed()
814
+ return self.user_purchase_history[user_id]
815
+
816
+ def get_ip_accounts(self, user_id: str) -> int:
817
+ random.seed(hash(user_id + "ip") % 2147483647)
818
+ result = random.randint(1, 5)
819
+ random.seed()
820
+ return result
821
+
822
+ def get_resale_frequency(self, user_id: str) -> int:
823
+ if user_id not in self.resale_history:
824
+ random.seed(hash(user_id + "resale") % 2147483647)
825
+ self.resale_history[user_id] = random.randint(0, 12)
826
+ random.seed()
827
+ return self.resale_history[user_id]
828
+
829
+ mock_db = MockDatabase()
830
+
831
+ # ========== ENHANCED MAIN APPLICATION ==========
832
+ class IntelligentAntiScalpingSystem:
833
+ def __init__(self):
834
+ self.verification_engine = EnhancedUserVerificationEngine()
835
+ self.scalping_engine = EnhancedScalpingDetectionEngine()
836
+ self.pricing_engine = EnhancedDynamicPricingEngine()
837
+ self.compliance_engine = EnhancedComplianceEngine()
838
+ self.ai_service = GeminiAIService()
839
+
840
+ logger.info("Intelligent Anti-Scalping System initialized")
841
+
842
+ async def process_intelligent_transaction(
843
  self,
844
  name: str,
845
+ email: str,
846
  user_id: str,
847
  event_name: str,
848
  ticket_type: str,
 
851
  demand_level: str,
852
  proposed_resale_price: float
853
  ) -> Dict[str, Any]:
854
+ """Process transaction through enhanced AI-powered analysis"""
855
 
856
  try:
857
+ # Run analyses concurrently for better performance
858
+ verification_task = self.verification_engine.verify_user(name, email, user_id)
859
+ scalping_task = self.scalping_engine.detect_scalping(user_id, ticket_quantity, event_name)
860
+ pricing_task = self.pricing_engine.calculate_pricing(
861
+ original_price, demand_level, proposed_resale_price, event_name, ticket_type
862
+ )
863
 
864
+ # Wait for core analyses to complete
865
+ verification, scalping_detection, pricing = await asyncio.gather(
866
+ verification_task, scalping_task, pricing_task
867
  )
868
 
869
+ # Compliance check depends on scalping detection
870
+ compliance = await self.compliance_engine.check_compliance(
871
+ user_id, proposed_resale_price, original_price, scalping_detection, event_name
872
  )
873
 
874
+ # Final decision logic with weighted scoring
875
+ verification_score = 1.0 if verification.is_verified else 0.0
876
+ scalping_score = 0.0 if scalping_detection.is_scalper else 1.0
877
+ compliance_score = 1.0 if compliance.resale_allowed else 0.0
878
+
879
+ # Weighted decision (verification: 30%, scalping: 40%, compliance: 30%)
880
+ overall_score = (verification_score * 0.3 + scalping_score * 0.4 + compliance_score * 0.3)
881
+
882
+ final_decision = "APPROVED" if overall_score >= 0.7 else "DENIED"
883
+
884
+ # Generate intelligent action items
885
+ action_items = self._generate_action_items(
886
+ final_decision, verification, scalping_detection, compliance, pricing
887
  )
888
 
889
+ # Create comprehensive report
890
+ report_data = {
891
+ "verification": verification.dict(),
892
+ "scalping_detection": scalping_detection.dict(),
893
+ "pricing": pricing.dict(),
894
+ "compliance": compliance.dict(),
895
+ "final_decision": final_decision,
896
+ "overall_score": overall_score
897
+ }
898
+
899
+ # Generate AI executive summary
900
+ ai_summary, confidence_score = await self.ai_service.generate_executive_summary(report_data)
901
+
902
+ # Create final report
903
+ report = IntelligentReport(
 
 
 
 
 
 
904
  verification=verification,
905
  scalping_detection=scalping_detection,
906
  pricing=pricing,
907
  compliance=compliance,
908
  final_decision=final_decision,
909
+ action_items=action_items,
910
+ ai_summary=ai_summary,
911
+ confidence_score=confidence_score
912
  )
913
 
914
  return report.dict()
915
 
916
  except Exception as e:
917
+ logger.error(f"Error processing intelligent transaction: {e}")
918
  return self._create_error_response(str(e))
919
 
920
+ def _generate_action_items(
921
+ self,
922
+ decision: str,
923
+ verification: UserVerification,
924
+ scalping: ScalpingDetection,
925
+ compliance: ResaleCompliance,
926
+ pricing: PricingRecommendation
927
+ ) -> List[str]:
928
+ """Generate intelligent action items based on analysis results"""
929
+
930
+ actions = []
931
+
932
+ if decision == "APPROVED":
933
+ actions.extend([
934
+ "βœ… Process ticket resale at recommended price",
935
+ f"πŸ’° Set final price to ${pricing.recommended_resale_price:.2f}",
936
+ "πŸ“Š Monitor user activity for ongoing compliance"
937
+ ])
938
+
939
+ if verification.risk_score > 0.3:
940
+ actions.append("⚠️ Enhanced monitoring due to elevated risk score")
941
+
942
+ if scalping.confidence > 0.5:
943
+ actions.append("πŸ” Close monitoring recommended due to scalping indicators")
944
+
945
+ else: # DENIED
946
+ actions.append("❌ Block transaction immediately")
947
+
948
+ if scalping.is_scalper:
949
+ actions.extend([
950
+ "🚨 Flag user account for comprehensive review",
951
+ "πŸ“§ Send scalping policy violation notice",
952
+ "πŸ”’ Consider temporary account restriction"
953
+ ])
954
+
955
+ if not verification.is_verified:
956
+ actions.extend([
957
+ "πŸ“‹ Require enhanced identity verification",
958
+ "πŸ“ž Manual verification call recommended"
959
+ ])
960
+
961
+ if compliance.violations:
962
+ actions.append(f"πŸ“ Send policy violation notice: {', '.join(compliance.violations)}")
963
+
964
+ if pricing.recommended_resale_price != pricing.original_price:
965
+ actions.append(f"πŸ’‘ Suggest alternative price: ${pricing.recommended_resale_price:.2f}")
966
+
967
+ # Add market-specific actions
968
+ if pricing.market_intelligence.market_sentiment == "negative":
969
+ actions.append("πŸ“‰ Market conditions unfavorable - consider delaying resale")
970
+ elif pricing.market_intelligence.price_trend == "falling":
971
+ actions.append("⏰ Price trend declining - urgent sale recommended")
972
+
973
+ return actions
974
+
975
  def _create_error_response(self, error_msg: str) -> Dict[str, Any]:
976
+ """Create error response with fallback data"""
977
  return {
978
  "error": True,
979
  "message": f"System error: {error_msg}",
980
  "final_decision": "ERROR",
981
+ "action_items": ["πŸ”§ Contact system administrator", "πŸ“ž Manual review required"],
982
+ "ai_summary": "System error occurred during analysis",
983
+ "confidence_score": 0.0
984
  }
985
 
986
  # ========== GRADIO INTERFACE ==========
987
  def create_interface():
988
+ system = IntelligentAntiScalpingSystem()
989
 
990
  def process_transaction(
991
  name, email, user_id, event_name, ticket_type,
 
1008
  except (ValueError, TypeError):
1009
  return "❌ **Error**: Please enter valid numbers for prices and quantity"
1010
 
1011
+ # Process through the intelligent system (run async in sync context)
1012
+ loop = asyncio.new_event_loop()
1013
+ asyncio.set_event_loop(loop)
1014
+ try:
1015
+ result = loop.run_until_complete(
1016
+ system.process_intelligent_transaction(
1017
+ name=name,
1018
+ email=email,
1019
+ user_id=user_id,
1020
+ event_name=event_name,
1021
+ ticket_type=ticket_type,
1022
+ ticket_quantity=ticket_quantity,
1023
+ original_price=original_price,
1024
+ demand_level=demand_level,
1025
+ proposed_resale_price=proposed_resale_price
1026
+ )
1027
+ )
1028
+ finally:
1029
+ loop.close()
1030
 
1031
  # Handle errors
1032
  if result.get("error"):
1033
  return f"❌ **System Error**: {result.get('message', 'Unknown error occurred')}"
1034
 
1035
+ # Format the comprehensive output
1036
  decision_emoji = "βœ…" if result['final_decision'] == "APPROVED" else "❌"
1037
+ confidence_color = "🟒" if result.get('confidence_score', 0) > 0.8 else "🟑" if result.get('confidence_score', 0) > 0.6 else "πŸ”΄"
1038
 
1039
  output = f"""
1040
+ # πŸ€– Intelligent Anti-Scalping Analysis Report
1041
 
1042
  ## {decision_emoji} Final Decision: **{result['final_decision']}**
1043
+ **AI Confidence**: {result.get('confidence_score', 0):.1%} {confidence_color}
1044
+
1045
+ > 🧠 **AI Summary**: {result.get('ai_summary', 'Analysis completed successfully')}
1046
 
1047
  ---
1048
 
1049
+ ## πŸ‘€ Enhanced User Verification
1050
  - **User ID**: `{result['verification']['user_id']}`
1051
  - **Name**: {result['verification']['name']}
1052
  - **Email**: {result['verification']['email']}
 
1054
  - **Risk Score**: {result['verification']['risk_score']:.1%} {'🟒' if result['verification']['risk_score'] < 0.3 else '🟑' if result['verification']['risk_score'] < 0.6 else 'πŸ”΄'}
1055
  - **Verification Level**: {result['verification']['verification_level'].title()}
1056
 
1057
+ ### 🧠 AI Behavioral Analysis:
1058
+ {result['verification']['ai_behavioral_analysis']}
1059
+
1060
+ ---
1061
+
1062
+ ## πŸ” Advanced Scalping Detection
1063
  - **Scalper Detected**: {'🚨 YES' if result['scalping_detection']['is_scalper'] else 'βœ… NO'}
1064
  - **Detection Confidence**: {result['scalping_detection']['confidence']:.1%}
1065
  - **Purchase Velocity**: {result['scalping_detection']['purchase_velocity']} purchases/hour
1066
  - **IP Address Duplicates**: {result['scalping_detection']['ip_duplicates']} accounts
1067
+ - **Network Connections**: {len(result['scalping_detection']['network_connections'])} suspicious links
1068
  - **Red Flags**: {', '.join(result['scalping_detection']['flags']) if result['scalping_detection']['flags'] else 'βœ… None detected'}
1069
 
1070
+ ### 🧠 AI Pattern Analysis:
1071
+ {result['scalping_detection']['ai_pattern_analysis']}
1072
+
1073
+ ---
1074
+
1075
+ ## πŸ’° Intelligent Market Pricing
1076
  - **Original Price**: ${result['pricing']['original_price']:.2f}
1077
  - **Proposed Resale**: ${proposed_resale_price:.2f}
1078
+ - **AI Recommended**: ${result['pricing']['recommended_resale_price']:.2f}
1079
+ - **Market Fair Price**: ${result['pricing']['market_fair_price']:.2f}
1080
  - **Price Ratio**: {result['pricing']['recommended_resale_price']/result['pricing']['original_price']:.2f}x
1081
+
1082
+ ### πŸ“Š Market Intelligence:
1083
+ - **Market Sentiment**: {result['pricing']['market_intelligence']['market_sentiment'].title()}
1084
+ - **Price Trend**: {result['pricing']['market_intelligence']['price_trend'].title()} πŸ“ˆ
1085
+ - **Supply/Demand**: {result['pricing']['market_intelligence']['supply_demand_ratio']:.2f}
1086
+ - **Average Market Price**: ${result['pricing']['market_intelligence']['average_resale_price']:.2f}
1087
  """
1088
 
1089
  if result['pricing'].get('profit_margin'):
 
1091
  elif result['pricing'].get('loss_percentage'):
1092
  output += f"- **Loss**: -{result['pricing']['loss_percentage']:.1f}% πŸ“‰\n"
1093
 
1094
+ output += f"""
1095
+ ### 🧠 AI Pricing Rationale:
1096
+ {result['pricing']['ai_pricing_rationale']}
1097
+
1098
+ ---
1099
 
1100
+ ## βœ… Compliance & Policy Analysis
1101
  - **Policy Compliant**: {'βœ… Yes' if result['compliance']['is_compliant'] else '❌ No'}
1102
  - **Resale Permitted**: {'βœ… Yes' if result['compliance']['resale_allowed'] else '❌ No'}
1103
  - **Maximum Allowed**: ${result['compliance']['max_allowed_price']:.2f}
1104
+ - **Violations**: {', '.join(result['compliance']['violations']) if result['compliance']['violations'] else 'βœ… None'}
1105
+
1106
+ ### 🧠 AI Policy Analysis:
1107
+ {result['compliance']['ai_policy_analysis']}
1108
 
1109
  ---
1110
 
1111
+ ## πŸ“‹ Intelligent Action Items
1112
  """
1113
  for i, action in enumerate(result['action_items'], 1):
1114
  output += f"{i}. {action}\n"
1115
 
1116
  # Add summary box
1117
  if result['final_decision'] == "APPROVED":
1118
+ output += f"""
1119
+ ---
1120
+
1121
+ > βœ… **TRANSACTION APPROVED**
1122
+ >
1123
+ > **Confidence Level**: {result.get('confidence_score', 0):.1%}
1124
+ >
1125
+ > All AI-powered checks passed. Resale can proceed with recommended monitoring.
1126
  """
1127
  else:
1128
+ output += f"""
1129
+ ---
1130
+
1131
+ > ❌ **TRANSACTION BLOCKED**
1132
+ >
1133
+ > **Risk Assessment**: High risk detected by AI analysis
1134
+ >
1135
+ > Policy violations and suspicious patterns identified. Manual review required.
1136
+ """
1137
+
1138
+ output += f"""
1139
+ ---
1140
+
1141
+ *πŸ€– Analysis powered by Google Gemini AI β€’ Generated at {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*
1142
  """
1143
 
1144
  return output
1145
 
1146
+ # Create enhanced Gradio interface
1147
  with gr.Blocks(
1148
+ title="πŸ€– Intelligent Anti-Scalping System",
1149
  theme=gr.themes.Soft(
1150
  primary_hue="blue",
1151
+ secondary_hue="purple",
1152
+ ),
1153
+ css="""
1154
+ .gradio-container {
1155
+ max-width: 1200px !important;
1156
+ }
1157
+ """
1158
  ) as interface:
1159
 
1160
  gr.Markdown("""
1161
+ # πŸ€– Intelligent AI-Powered Anti-Scalping System
1162
 
1163
+ Next-generation ticket scalping prevention using **Google Gemini AI**, real-time market intelligence,
1164
+ and advanced behavioral analysis. Protects consumers while ensuring fair market pricing.
1165
+
1166
+ ## πŸš€ AI-Powered Features:
1167
+
1168
+ **🧠 Behavioral Analysis**: Gemini AI analyzes user patterns for authenticity
1169
+ **πŸ” Pattern Recognition**: Advanced scalping detection using network analysis
1170
+ **πŸ“Š Market Intelligence**: Real-time pricing data and sentiment analysis
1171
+ **βš–οΈ Policy Compliance**: AI-powered policy interpretation and enforcement
1172
+ **πŸ“ˆ Dynamic Pricing**: Market-aware fair pricing recommendations
1173
 
1174
  ---
1175
  """)
 
1185
  event_name = gr.Textbox(label="Event Name", placeholder="Taylor Swift - Eras Tour", value="")
1186
  ticket_type = gr.Dropdown(
1187
  label="Ticket Type",
1188
+ choices=["General Admission", "VIP", "Premium", "Standard", "Balcony", "Floor", "Front Row"],
1189
  value="Standard"
1190
  )
1191
  ticket_quantity = gr.Number(label="Number of Tickets", value=1, minimum=1, maximum=10, precision=0)
 
1199
  )
1200
  proposed_resale_price = gr.Number(label="Proposed Resale Price ($)", value=150, minimum=1)
1201
 
1202
+ submit_btn = gr.Button("πŸ€– Run AI Analysis", variant="primary", size="lg")
1203
 
1204
  with gr.Column(scale=2):
1205
+ output = gr.Markdown(value="""
1206
+ πŸ€– **Ready for AI Analysis!**
1207
+
1208
+ Fill in the transaction details and click 'Run AI Analysis' to get comprehensive:
1209
+ - 🧠 AI behavioral analysis
1210
+ - πŸ” Advanced scalping detection
1211
+ - πŸ“Š Real-time market intelligence
1212
+ - βš–οΈ Policy compliance assessment
1213
+ - πŸ“ˆ Dynamic pricing recommendations
1214
+ """)
1215
 
1216
  # Event handlers
1217
  submit_btn.click(
 
1232
  ["John Smith", "john@gmail.com", "USER001", "Taylor Swift - Eras Tour", "VIP", 2, 500, "high", 750],
1233
  ["Jane Doe", "jane@company.com", "USER002", "NBA Finals Game 7", "Premium", 4, 300, "high", 1200],
1234
  ["Bob Wilson", "bob@email.com", "USER003", "Local Concert", "General Admission", 1, 50, "low", 40],
1235
+ ["ScalperBot", "temp@fake.com", "BOT999", "Popular Event", "Standard", 8, 100, "high", 300],
1236
+ ["Sarah Johnson", "sarah.j@university.edu", "USER456", "Music Festival", "Premium", 3, 200, "medium", 280],
1237
  ],
1238
  inputs=[
1239
  name, email, user_id, event_name, ticket_type,
1240
  ticket_quantity, original_price, demand_level, proposed_resale_price
1241
  ]
1242
  )
1243
+
1244
+ gr.Markdown("""
1245
+ ---
1246
+
1247
+ ### πŸ”‘ System Configuration
1248
+
1249
+ **AI Provider**: Google Gemini 1.5 Flash (Set `GEMINI_API_KEY` environment variable)
1250
+ **Market Data**: Serper API (Optional: Set `SERPER_API_KEY` for real-time data)
1251
+ **Fallback Mode**: System works without API keys using intelligent simulations
1252
+
1253
+ *πŸ”’ Your API keys are never stored or shared. All processing is secure and private.*
1254
+ """)
1255
 
1256
  return interface
1257