Commit
·
79c2799
1
Parent(s):
6e2473a
fix json formate from main.py
Browse files- main.py +118 -134
- templates/index.html +19 -5
main.py
CHANGED
@@ -250,152 +250,136 @@ def compute_answers():
|
|
250 |
|
251 |
@app.route('/compute_marks', methods=['POST'])
|
252 |
def compute_marks():
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
correct_answers = request.form.getlist('correct_answers[]')
|
259 |
-
if not correct_answers:
|
260 |
-
log_print("No correct answers provided", "ERROR")
|
261 |
-
return jsonify({"error": "No correct answers provided"}), 400
|
262 |
-
|
263 |
-
# Create TFIDF values for correct answers
|
264 |
-
max_tfidf = create_tfidf_values(correct_answers)
|
265 |
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
|
|
|
|
271 |
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
student_folder = path_parts[-2]
|
289 |
-
filename = path_parts[-1]
|
290 |
-
|
291 |
-
if student_folder not in results:
|
292 |
-
results[student_folder] = [0] * len(correct_answers)
|
293 |
-
|
294 |
-
# Save and process file
|
295 |
-
student_dir = os.path.join(base_temp_dir, student_folder)
|
296 |
-
os.makedirs(student_dir, exist_ok=True)
|
297 |
-
filepath = os.path.join(student_dir, filename)
|
298 |
-
file.save(filepath)
|
299 |
-
|
300 |
-
# Extract text
|
301 |
-
extracted_text = extract_text_from_image(filepath)
|
302 |
-
if not extracted_text:
|
303 |
-
log_print(f"No text extracted from {file.filename}", "WARNING")
|
304 |
-
failed_files.append({
|
305 |
-
"file": file.filename,
|
306 |
-
"error": "No text could be extracted"
|
307 |
-
})
|
308 |
-
continue
|
309 |
-
|
310 |
-
# Clean the extracted text for JSON
|
311 |
-
extracted_text = extracted_text.encode('ascii', 'ignore').decode('ascii')
|
312 |
-
log_print(f"Extracted text from {file.filename}: {extracted_text}")
|
313 |
-
|
314 |
-
# Find best matching answer
|
315 |
-
best_score = 0
|
316 |
-
best_answer_index = 0
|
317 |
-
|
318 |
-
for i, correct_answer in enumerate(correct_answers):
|
319 |
-
try:
|
320 |
-
# Clean the correct answer for comparison
|
321 |
-
clean_correct_answer = correct_answer.encode('ascii', 'ignore').decode('ascii')
|
322 |
-
|
323 |
-
# Calculate similarity scores
|
324 |
-
semantic_score = question_vector_sentence(extracted_text, clean_correct_answer)
|
325 |
-
word_score = question_vector_word(extracted_text, clean_correct_answer)
|
326 |
-
tfidf_score = tfidf_answer_score(extracted_text, clean_correct_answer, max_tfidf)
|
327 |
-
ft_score = fasttext_similarity(extracted_text, clean_correct_answer)
|
328 |
-
llm_marks = llm_score(extracted_text, clean_correct_answer)
|
329 |
-
|
330 |
-
combined_score = (
|
331 |
-
semantic_score * 0.3 +
|
332 |
-
word_score * 0.2 +
|
333 |
-
tfidf_score * 0.2 +
|
334 |
-
ft_score * 0.2 +
|
335 |
-
llm_marks * 0.1
|
336 |
-
)
|
337 |
-
|
338 |
-
if combined_score > best_score:
|
339 |
-
best_score = combined_score
|
340 |
-
best_answer_index = i
|
341 |
-
|
342 |
-
except Exception as score_error:
|
343 |
-
error_msg = str(score_error).encode('ascii', 'ignore').decode('ascii')
|
344 |
-
failed_files.append({
|
345 |
-
"file": file.filename,
|
346 |
-
"error": f"Error calculating scores: {error_msg}"
|
347 |
-
})
|
348 |
-
continue
|
349 |
|
350 |
-
|
351 |
-
|
|
|
|
|
|
|
352 |
|
353 |
-
|
354 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
355 |
failed_files.append({
|
356 |
"file": file.filename,
|
357 |
-
"error":
|
358 |
})
|
359 |
continue
|
360 |
-
|
361 |
-
finally:
|
362 |
-
# Clean up temp directory
|
363 |
-
try:
|
364 |
-
shutil.rmtree(base_temp_dir)
|
365 |
-
except Exception:
|
366 |
-
pass
|
367 |
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
377 |
|
378 |
-
|
379 |
-
|
380 |
-
"failed_files": [{
|
381 |
-
"file": f["file"].encode('ascii', 'ignore').decode('ascii'),
|
382 |
-
"error": f["error"].encode('ascii', 'ignore').decode('ascii')
|
383 |
-
} for f in failed_files]
|
384 |
-
}
|
385 |
|
386 |
-
|
387 |
-
|
388 |
-
|
|
|
|
|
389 |
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
return
|
|
|
|
|
|
|
|
|
399 |
|
400 |
@app.route('/check_logs')
|
401 |
def check_logs():
|
|
|
250 |
|
251 |
@app.route('/compute_marks', methods=['POST'])
|
252 |
def compute_marks():
|
253 |
+
try:
|
254 |
+
# Get correct answers
|
255 |
+
correct_answers = request.form.getlist('correct_answers[]')
|
256 |
+
if not correct_answers:
|
257 |
+
return jsonify({"error": "No correct answers provided"}), 400, {'Content-Type': 'application/json'}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
+
# Create TFIDF values for correct answers
|
260 |
+
max_tfidf = create_tfidf_values(correct_answers)
|
261 |
+
|
262 |
+
# Get all uploaded files
|
263 |
+
files = request.files.getlist('file')
|
264 |
+
if not files:
|
265 |
+
return jsonify({"error": "No files uploaded"}), 400, {'Content-Type': 'application/json'}
|
266 |
|
267 |
+
# Create a temporary directory for processing
|
268 |
+
base_temp_dir = tempfile.mkdtemp()
|
269 |
+
|
270 |
+
# Dictionary to store results by student folder
|
271 |
+
results = {}
|
272 |
+
failed_files = []
|
273 |
+
|
274 |
+
try:
|
275 |
+
# Process each file
|
276 |
+
for file in files:
|
277 |
+
try:
|
278 |
+
# Get folder structure from file path
|
279 |
+
path_parts = file.filename.split('/')
|
280 |
+
if len(path_parts) < 2:
|
281 |
+
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
282 |
|
283 |
+
student_folder = path_parts[-2]
|
284 |
+
filename = path_parts[-1]
|
285 |
+
|
286 |
+
if student_folder not in results:
|
287 |
+
results[student_folder] = [0] * len(correct_answers)
|
288 |
|
289 |
+
# Save and process file
|
290 |
+
student_dir = os.path.join(base_temp_dir, student_folder)
|
291 |
+
os.makedirs(student_dir, exist_ok=True)
|
292 |
+
filepath = os.path.join(student_dir, filename)
|
293 |
+
file.save(filepath)
|
294 |
+
|
295 |
+
# Extract text
|
296 |
+
extracted_text = extract_text_from_image(filepath)
|
297 |
+
if not extracted_text:
|
298 |
failed_files.append({
|
299 |
"file": file.filename,
|
300 |
+
"error": "No text could be extracted"
|
301 |
})
|
302 |
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
303 |
|
304 |
+
# Clean the extracted text for JSON
|
305 |
+
extracted_text = extracted_text.encode('ascii', 'ignore').decode('ascii')
|
306 |
+
|
307 |
+
# Find best matching answer
|
308 |
+
best_score = 0
|
309 |
+
best_answer_index = 0
|
310 |
+
|
311 |
+
for i, correct_answer in enumerate(correct_answers):
|
312 |
+
try:
|
313 |
+
# Clean the correct answer for comparison
|
314 |
+
clean_correct_answer = correct_answer.encode('ascii', 'ignore').decode('ascii')
|
315 |
+
|
316 |
+
# Calculate similarity scores
|
317 |
+
semantic_score = question_vector_sentence(extracted_text, clean_correct_answer)
|
318 |
+
word_score = question_vector_word(extracted_text, clean_correct_answer)
|
319 |
+
tfidf_score = tfidf_answer_score(extracted_text, clean_correct_answer, max_tfidf)
|
320 |
+
ft_score = fasttext_similarity(extracted_text, clean_correct_answer)
|
321 |
+
llm_marks = llm_score(extracted_text, clean_correct_answer)
|
322 |
+
|
323 |
+
combined_score = (
|
324 |
+
semantic_score * 0.3 +
|
325 |
+
word_score * 0.2 +
|
326 |
+
tfidf_score * 0.2 +
|
327 |
+
ft_score * 0.2 +
|
328 |
+
llm_marks * 0.1
|
329 |
+
)
|
330 |
+
|
331 |
+
if combined_score > best_score:
|
332 |
+
best_score = combined_score
|
333 |
+
best_answer_index = i
|
334 |
+
|
335 |
+
except Exception as score_error:
|
336 |
+
error_msg = str(score_error).encode('ascii', 'ignore').decode('ascii')
|
337 |
+
failed_files.append({
|
338 |
+
"file": file.filename,
|
339 |
+
"error": f"Error calculating scores: {error_msg}"
|
340 |
+
})
|
341 |
+
continue
|
342 |
+
|
343 |
+
marks = new_value(best_score, 0, 1, 0, 5)
|
344 |
+
results[student_folder][best_answer_index] = round(marks, 2)
|
345 |
+
|
346 |
+
except Exception as e:
|
347 |
+
error_msg = str(e).encode('ascii', 'ignore').decode('ascii')
|
348 |
+
failed_files.append({
|
349 |
+
"file": file.filename,
|
350 |
+
"error": error_msg
|
351 |
+
})
|
352 |
+
continue
|
353 |
+
|
354 |
+
finally:
|
355 |
+
# Clean up temp directory
|
356 |
+
try:
|
357 |
+
shutil.rmtree(base_temp_dir)
|
358 |
+
except Exception:
|
359 |
+
pass
|
360 |
|
361 |
+
if not results:
|
362 |
+
return jsonify({"error": "No results computed"}), 400, {'Content-Type': 'application/json'}
|
|
|
|
|
|
|
|
|
|
|
363 |
|
364 |
+
# Clean the results for JSON response
|
365 |
+
clean_results = {}
|
366 |
+
for student, scores in results.items():
|
367 |
+
clean_student = student.encode('ascii', 'ignore').decode('ascii')
|
368 |
+
clean_results[clean_student] = scores
|
369 |
|
370 |
+
response_data = {
|
371 |
+
"results": clean_results,
|
372 |
+
"failed_files": [{
|
373 |
+
"file": f["file"].encode('ascii', 'ignore').decode('ascii'),
|
374 |
+
"error": f["error"].encode('ascii', 'ignore').decode('ascii')
|
375 |
+
} for f in failed_files]
|
376 |
+
}
|
377 |
+
|
378 |
+
return jsonify(response_data), 200, {'Content-Type': 'application/json'}
|
379 |
+
|
380 |
+
except Exception as e:
|
381 |
+
error_msg = str(e).encode('ascii', 'ignore').decode('ascii')
|
382 |
+
return jsonify({"error": f"Error computing marks: {error_msg}"}), 500, {'Content-Type': 'application/json'}
|
383 |
|
384 |
@app.route('/check_logs')
|
385 |
def check_logs():
|
templates/index.html
CHANGED
@@ -987,6 +987,7 @@
|
|
987 |
throw new Error("No valid image files found in the uploaded folder");
|
988 |
}
|
989 |
|
|
|
990 |
const response = await fetch('/compute_marks', {
|
991 |
method: 'POST',
|
992 |
headers: {
|
@@ -995,21 +996,34 @@
|
|
995 |
body: formData
|
996 |
});
|
997 |
|
|
|
|
|
998 |
const contentType = response.headers.get('Content-Type');
|
999 |
-
|
1000 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1001 |
}
|
1002 |
|
1003 |
if (!response.ok) {
|
1004 |
-
|
1005 |
-
throw new Error(errorData.error || 'Server returned an error response');
|
1006 |
}
|
1007 |
|
1008 |
-
const result = await response.json();
|
1009 |
if (result.error) {
|
1010 |
throw new Error(result.error);
|
1011 |
}
|
1012 |
|
|
|
|
|
|
|
|
|
1013 |
displayMarks(result.results);
|
1014 |
|
1015 |
} catch (error) {
|
|
|
987 |
throw new Error("No valid image files found in the uploaded folder");
|
988 |
}
|
989 |
|
990 |
+
console.log('Sending request to compute marks...');
|
991 |
const response = await fetch('/compute_marks', {
|
992 |
method: 'POST',
|
993 |
headers: {
|
|
|
996 |
body: formData
|
997 |
});
|
998 |
|
999 |
+
console.log('Response received:', response);
|
1000 |
+
console.log('Response headers:', response.headers);
|
1001 |
const contentType = response.headers.get('Content-Type');
|
1002 |
+
console.log('Content-Type:', contentType);
|
1003 |
+
|
1004 |
+
let result;
|
1005 |
+
const responseText = await response.text();
|
1006 |
+
console.log('Response text:', responseText);
|
1007 |
+
|
1008 |
+
try {
|
1009 |
+
result = JSON.parse(responseText);
|
1010 |
+
} catch (e) {
|
1011 |
+
console.error('Error parsing JSON:', e);
|
1012 |
+
throw new Error('Failed to parse server response as JSON');
|
1013 |
}
|
1014 |
|
1015 |
if (!response.ok) {
|
1016 |
+
throw new Error(result.error || 'Server returned an error response');
|
|
|
1017 |
}
|
1018 |
|
|
|
1019 |
if (result.error) {
|
1020 |
throw new Error(result.error);
|
1021 |
}
|
1022 |
|
1023 |
+
if (!result.results) {
|
1024 |
+
throw new Error('No results found in server response');
|
1025 |
+
}
|
1026 |
+
|
1027 |
displayMarks(result.results);
|
1028 |
|
1029 |
} catch (error) {
|