File size: 10,822 Bytes
5d7e9a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e9544b
5d7e9a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))

from pocketflow import Node, BatchNode
from utils.questionnaire import load_questionnaire, save_questionnaire
from utils.mbti_scoring import traditional_mbti_score, determine_mbti_type
from utils.report_generator import generate_report
# Conditional LLM import
try:
    from utils.call_llm import call_llm
    LLM_AVAILABLE = True
except ImportError:
    LLM_AVAILABLE = False
    def call_llm(prompt):
        return "LLM not available - install dependencies"
from datetime import datetime

class LoadQuestionnaireNode(Node):
    def prep(self, shared):
        return shared.get("config", {}).get("import_file")
    
    def exec(self, import_file):
        return load_questionnaire(import_file)
    
    def post(self, shared, prep_res, exec_res):
        shared["questionnaire"]["questions"] = exec_res
        shared["questionnaire"]["metadata"]["created_at"] = datetime.now().isoformat()
        return "default"

class PresentQuestionsNode(Node):
    def prep(self, shared):
        return shared["questionnaire"]["questions"], shared["config"]["ui_mode"]
    
    def exec(self, inputs):
        questions, ui_mode = inputs
        responses = {}
        
        if ui_mode == "cli":
            print("\n=== MBTI Personality Questionnaire ===")
            print("Rate each statement from 1-5:")
            print("1=Strongly Disagree, 2=Disagree, 3=Neutral, 4=Agree, 5=Strongly Agree\n")
            
            for q in questions:
                while True:
                    try:
                        print(f"Q{q['id']}: {q['text']}")
                        response = int(input("Your rating (1-5): "))
                        if 1 <= response <= 5:
                            responses[q['id']] = response
                            break
                        else:
                            print("Please enter a number between 1 and 5.")
                    except ValueError:
                        print("Please enter a valid number.")
                print()
        else:
            # For Gradio mode, we'll handle this in the main interface
            # For now, return empty responses to be filled by UI
            pass
            
        return responses
    
    def post(self, shared, prep_res, exec_res):
        shared["questionnaire"]["responses"] = exec_res
        return "default"

class AnalyzeResponsesBatchNode(BatchNode):
    def prep(self, shared):
        return list(shared["questionnaire"]["responses"].items())
    
    def exec(self, response_item):
        question_id, response = response_item
        # Validate and normalize response
        if isinstance(response, str):
            response_map = {
                'strongly_disagree': 1, 'disagree': 2, 'neutral': 3,
                'agree': 4, 'strongly_agree': 5
            }
            normalized = response_map.get(response, 3)
        else:
            normalized = max(1, min(5, int(response)))
        
        return (question_id, normalized)
    
    def post(self, shared, prep_res, exec_res_list):
        # Update responses with normalized values
        normalized_responses = dict(exec_res_list)
        shared["questionnaire"]["responses"] = normalized_responses
        return "default"

class TraditionalScoringNode(Node):
    def prep(self, shared):
        return shared["questionnaire"]["responses"]
    
    def exec(self, responses):
        return traditional_mbti_score(responses)
    
    def post(self, shared, prep_res, exec_res):
        shared["analysis"]["traditional_scores"] = exec_res
        return "default"

class LLMAnalysisNode(Node):
    def __init__(self, max_retries=3):
        super().__init__(max_retries=max_retries)
        if not LLM_AVAILABLE:
            print("Warning: LLM not available, using fallback analysis")
    
    def prep(self, shared):
        responses = shared["questionnaire"]["responses"]
        questions = shared["questionnaire"]["questions"]
        mbti_type = shared["results"]["mbti_type"]
        traditional_scores = shared["analysis"]["traditional_scores"]
        
        # Ensure questions are loaded
        if not questions:
            questions = load_questionnaire()
            shared["questionnaire"]["questions"] = questions
        
        # Format responses for LLM with dimension info
        formatted_responses = []
        for q in questions:
            response_val = responses.get(q['id'], 3)
            response_text = {1: "Strongly Disagree", 2: "Disagree", 3: "Neutral", 
                           4: "Agree", 5: "Strongly Agree"}[response_val]
            dimension = q.get('dimension', 'Unknown')
            formatted_responses.append(f"Q{q['id']} ({dimension}): {q['text']} - **{response_text}**")
        
        print(f"DEBUG: Formatted {len(formatted_responses)} questions for LLM")
        return "\n".join(formatted_responses), mbti_type, traditional_scores
    
    def exec(self, inputs):
        formatted_responses, mbti_type, traditional_scores = inputs
        
        # Format dimension scores for context
        dimension_info = []
        pairs = [('E', 'I'), ('S', 'N'), ('T', 'F'), ('J', 'P')]
        for dim1, dim2 in pairs:
            score1 = traditional_scores.get(f'{dim1}_score', 0.5)
            score2 = traditional_scores.get(f'{dim2}_score', 0.5)
            stronger = dim1 if score1 > score2 else dim2
            percentage = max(score1, score2) * 100
            dimension_info.append(f"{dim1}/{dim2}: {stronger} ({percentage:.1f}%)")
        
        prompt = f"""
You are analyzing MBTI questionnaire responses for someone determined to be {mbti_type} type.

Here are their EXACT responses to each question:

{formatted_responses}

Traditional scoring results:
{chr(10).join(dimension_info)}

IMPORTANT: You have been provided with the complete set of questions and responses above. Please analyze these SPECIFIC responses.

Provide a detailed analysis that:

1. **Response Pattern Analysis**: Identify which responses strongly support the {mbti_type} determination and which might seem unexpected. Reference specific questions (e.g., "Q5 shows...", "Your response to Q12 indicates...").

2. **Characteristic Alignment**: Explain how their responses align with typical {mbti_type} characteristics, citing specific questions as evidence.

3. **Out-of-Character Responses**: Point out any responses that seem inconsistent with typical {mbti_type} patterns and provide possible explanations.

4. **Behavioral Patterns**: Describe key behavioral patterns shown through their responses, referencing the relevant questions.

5. **Strengths & Growth Areas**: Based on their specific responses, identify strengths they demonstrate and areas for potential growth.

6. **Communication & Work Style**: Infer their communication and work preferences from their question responses.

Must reference the actual questions provided above throughout your analysis using markdown anchor links like [Q1](#Q1), [Q2](#Q2), etc. This will create clickable links to the specific questions in the report. Do not make assumptions about questions not provided.
"""
        return call_llm(prompt)
    
    def post(self, shared, prep_res, exec_res):
        shared["analysis"]["llm_analysis"] = exec_res
        
        # Store responses data for report
        responses = shared["questionnaire"]["responses"]
        questions = shared["questionnaire"]["questions"]
        responses_data = []
        
        for q in questions:
            response_val = responses.get(q['id'], 3)
            response_text = {1: "Strongly Disagree", 2: "Disagree", 3: "Neutral", 
                           4: "Agree", 5: "Strongly Agree"}[response_val]
            responses_data.append({
                'id': q['id'],
                'text': q['text'],
                'dimension': q.get('dimension', 'Unknown'),
                'response': response_text,
                'value': response_val
            })
        
        shared["analysis"]["responses_data"] = responses_data
        return "default"

class DetermineMBTITypeNode(Node):
    def prep(self, shared):
        return shared["analysis"]["traditional_scores"], shared["analysis"].get("llm_analysis", "")
    
    def exec(self, inputs):
        traditional_scores, llm_analysis = inputs
        
        # Primary determination from traditional scoring
        mbti_type = determine_mbti_type(traditional_scores)
        
        # Calculate confidence scores
        confidence_scores = {}
        pairs = [('E', 'I'), ('S', 'N'), ('T', 'F'), ('J', 'P')]
        
        for dim1, dim2 in pairs:
            score1 = traditional_scores.get(f'{dim1}_score', 0.5)
            score2 = traditional_scores.get(f'{dim2}_score', 0.5)
            confidence = abs(score1 - score2)  # Higher difference = higher confidence
            confidence_scores[f'{dim1}{dim2}_confidence'] = confidence
        
        return {
            "mbti_type": mbti_type,
            "confidence_scores": confidence_scores
        }
    
    def post(self, shared, prep_res, exec_res):
        shared["results"]["mbti_type"] = exec_res["mbti_type"]
        shared["analysis"]["confidence_scores"] = exec_res["confidence_scores"]
        return "default"

class GenerateReportNode(Node):
    def prep(self, shared):
        return (
            shared["results"]["mbti_type"],
            shared["analysis"],
            shared["config"]["output_format"]
        )
    
    def exec(self, inputs):
        mbti_type, analysis, output_format = inputs
        return generate_report(mbti_type, analysis, output_format)
    
    def post(self, shared, prep_res, exec_res):
        shared["exports"]["report_path"] = exec_res
        return "default"

class ExportDataNode(Node):
    def prep(self, shared):
        return shared["questionnaire"], shared["results"]
    
    def exec(self, inputs):
        questionnaire, results = inputs
        
        # Create export data
        export_data = {
            "questionnaire": questionnaire,
            "results": results,
            "metadata": {
                "exported_at": datetime.now().isoformat(),
                "version": "1.0"
            }
        }
        
        # Generate filename
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        mbti_type = results.get("mbti_type", "UNKNOWN")
        filename = f"mbti_questionnaire_{mbti_type}_{timestamp}.json"
        
        return export_data, filename
    
    def post(self, shared, prep_res, exec_res):
        export_data, filename = exec_res
        success = save_questionnaire(export_data, filename)
        
        if success:
            shared["exports"]["questionnaire_json"] = filename
        
        return "default"