File size: 10,917 Bytes
e6ecc60
 
 
 
 
 
 
 
 
 
 
f98e663
e6ecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf6f63a
e6ecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0390ef
 
e6ecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf6f63a
 
 
e6ecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e0390ef
 
 
 
 
 
 
e6ecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
import os
import json
from groq import Groq
from flask import current_app

class GroqClient:
    def __init__(self, api_key=None):
        self.api_key = api_key or current_app.config.get("GROQ_API_KEY")
        if not self.api_key:
            raise ValueError("Groq API key not found. Please set GROQ_API_KEY in config.")
        self.client = Groq(api_key=self.api_key)
        self.model = "llama-3.3-70b-versatile" 

    def generate_interrogation_questions(self, case_details: str, suspect_profile: str, evidence_summary: str) -> list[str]:
        """
        Generates at least 10 complex, cross-investigation style interrogation questions.
        """
        prompt = f"""You are an expert interrogation question writer for law enforcement agencies.
        Based on the following case information, generate at least 10 complex and insightful interrogation questions designed to uncover the truth, check for inconsistencies, and probe the suspect's knowledge and involvement. The questions should be suitable for a formal interrogation setting and encourage detailed responses rather than simple yes/no answers. Employ cross-investigation techniques where appropriate.

        Case Details: {case_details}
        Suspect Profile: {suspect_profile}
        Evidence Summary: {evidence_summary}

        Generate exactly 10 to 15 questions. Each question should be on a new line. Do not number the questions.
        Example Question Format:
        Can you explain your whereabouts on the night of the incident in detail, starting from 6 PM until midnight?
        How do you account for the discrepancies between your statement and the evidence found at the scene?
        What is your relationship with the other individuals mentioned in this case?

        Generated Questions:
        """
        try:
            chat_completion = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are an expert interrogation question writer."
                    },
                    {
                        "role": "user",
                        "content": prompt,
                    }
                ],
                model=self.model,
                temperature=0.7,
                max_tokens=1024,
                top_p=1,
                stop=None,
                
            )
            response_content = chat_completion.choices[0].message.content
            questions = [q.strip() for q in response_content.strip().split("\n") if q.strip()]
            # Ensure at least 10 questions, if not, add some generic ones or re-prompt (simplified for now)
            if len(questions) < 10:
                # Fallback or error handling needed here if LLM doesn't provide enough
                print(f"Warning: LLM generated only {len(questions)} questions. Expected at least 10.")
                # Add generic placeholders if needed, or raise an error
                # For now, we'll return what we have, but this should be more robust
            return questions[:15] # Return up to 15 questions
        except Exception as e:
            current_app.logger.error(f"Error generating interrogation questions from Groq: {e}")
            # Fallback: return generic questions if API fails
            return [
                "Can you describe your activities on the day of the incident?",
                "What is your relationship with the victim/complainant?",
                "Were you present at the location where the incident occurred?",
                "Do you have any information that could help this investigation?",
                "Is there anything you would like to add or clarify regarding your previous statements?",
                "Can you provide any alibi for the time of the incident?",
                "Who else might have information relevant to this case?",
                "What was your initial reaction upon learning about this incident?",
                "Have you discussed this matter with anyone else? If so, with whom?",
                "Is there any reason someone might falsely implicate you in this matter?"
            ]

    def generate_report_and_recommendations(self, interrogation_summary: str, profile_details: str, evidence_summary: str, selected_country_name: str) -> str:
        """
        Generates a structured investigation report and recommendations in JSON format.
        The JSON should follow a generic investigation agency standard.
        """
        prompt = f"""You are an AI assistant for a law enforcement agency, tasked with generating a structured investigation report.
        Based on the provided information, generate a comprehensive investigation report in JSON format. The report should follow a globally accepted standard structure for investigation reports.
        The JSON output must include these top-level keys: "caseSummary", "incidentDetails", "personsInvolved", "evidenceCollected", "interrogationSummary", "findingsAndInferences", "recommendations".
        Under "recommendations", provide actionable steps and considerations, taking into account that the legal context is for '{selected_country_name}'.

        Interrogation Summary (includes questions, answers, and observations): {interrogation_summary}
        Suspect Profile Details: {profile_details}
        Evidence Summary: {evidence_summary}
        Country for Legal Context of Recommendations: {selected_country_name}

        Ensure the entire output is a single, valid JSON object. Do not include any text outside the JSON structure.

        Example JSON structure (fill with relevant details based on input):
        {{ 
            "caseSummary": {{ "observations": "Behavioral observations during interrogation." }},
    
            "recommendations": {{ "chargesToBeFiled": ["Charge 1 (Specific to {selected_country_name} law)"], "furtherActions": ["Action A", "Action B (Consider {selected_country_name} procedures)"], "legalConsiderations_{selected_country_name}": "Specific legal points relevant to {selected_country_name}." }}
        }}
        """
        try:
            chat_completion = self.client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": "You are an AI assistant that generates structured investigation reports in JSON format."
                    },
                    {
                        "role": "user",
                        "content": prompt,
                    }
                ],
                model=self.model,
                response_format={"type": "json_object"}, # Request JSON output
                temperature=0.5,
                max_tokens=4096, # Allow for larger JSON output
                top_p=1,
                stop=None,
            )
            json_output = chat_completion.choices[0].message.content
            print(json_output)

            # Validate if the output is valid JSON
            try:
                json.loads(json_output)
                return json_output
            except json.JSONDecodeError as je:
                current_app.logger.error(f"Groq returned invalid JSON for report: {je}\nContent: {json_output}")
                # Fallback: return an error JSON or a template
                error_json = {{
                    "error": "Failed to generate valid JSON report from LLM.",
                    "details": str(je),
                    "received_content": json_output
                }}
                return json.dumps(error_json)

        except Exception as e:
            current_app.logger.error(f"Error generating report from Groq: {e}")
            # Fallback: return an error JSON
            error_json = {{
                "error": "Failed to generate report due to API error.",
                "details": str(e)
            }}
            return json.dumps(error_json)
    def extract_json(text):
        start = text.find('{')
        end = text.rfind('}') + 1
        if start != -1 and end != -1:
            json_str = text[start:end]
            return json.loads(json_str)
        raise ValueError("Valid JSON not found in text.")

# Example usage (for testing outside Flask app context):
if __name__ == '__main__':
    # This part won't run directly without a Flask app context for current_app.config
    # For standalone testing, you'd pass the API key directly or mock current_app
    print("This script is intended to be used within a Flask application context.")
    # Mock current_app for standalone testing
    class MockApp:
        def __init__(self):
            self.config = {"GROQ_API_KEY": "YOUR_GROQ_API_KEY_HERE"} # Replace with your key for testing
            self.logger = lambda: None # Mock logger
            self.logger.error = print

    # current_app = MockApp() # Uncomment and set key for standalone test

    # if current_app.config["GROQ_API_KEY"] != "YOUR_GROQ_API_KEY_HERE":
    #     client = GroqClient(api_key=current_app.config["GROQ_API_KEY"])
        
    #     print("--- Testing Question Generation ---")
    #     questions = client.generate_interrogation_questions(
    #         case_details="A case involving financial fraud where large sums of money were embezzled from a company over 6 months.",
    #         suspect_profile="The company's CFO, has access to all financial records, known to have gambling debts.",
    #         evidence_summary="Suspicious transactions traced to an offshore account linked to the CFO. No direct confession yet."
    #     )
    #     print("Generated Questions:")
    #     for q_idx, q_text in enumerate(questions):
    #         print(f"{q_idx+1}. {q_text}")

    #     print("\n--- Testing Report Generation ---")
    #     report_json = client.generate_report_and_recommendations(
    #         interrogation_summary="Suspect was evasive. Denied knowledge of offshore accounts initially, but became visibly nervous when presented with transaction details. Key Q: 'Can you explain these transfers to the Cayman Islands account?' A: 'I... I don't recall those specific transactions right now.'",
    #         profile_details="John Doe, CFO, 15 years with company. Recently divorced, known to frequent casinos.",
    #         evidence_summary="Bank statements show regular transfers to account XZY123 in Cayman Islands. Digital forensics found deleted emails discussing large cash withdrawals.",
    #         selected_country_name="USA"
    #     )
    #     print("Generated Report (JSON):")
    #     print(report_json)
    #     try:
    #         parsed_report = json.loads(report_json)
    #         print("\nReport JSON is valid.")
    #     except json.JSONDecodeError:
    #         print("\nReport JSON is INVALID.")
    # else:
    #     print("Please set your GROQ_API_KEY in the MockApp for standalone testing.")