yamanavijayavardhan commited on
Commit
c083a98
·
1 Parent(s): 428cbee

printing extracted text6

Browse files
Files changed (5) hide show
  1. HTR/app.py +27 -38
  2. HTR/hcr.py +48 -16
  3. main.py +6 -4
  4. templates/2.html +195 -27
  5. utils.py +28 -6
HTR/app.py CHANGED
@@ -56,84 +56,72 @@ def preprocess_image(img):
56
 
57
  def extract_text_from_image(img_path):
58
  try:
 
 
 
59
  # Ensure the image exists
60
  if not os.path.exists(img_path):
 
 
61
  notification_queue.put({
62
  "type": "error",
63
- "message": f"Image file not found: {img_path}"
64
  })
65
  return ""
66
 
67
  # Read the image
68
- notification_queue.put({
69
- "type": "info",
70
- "message": f"Reading image: {os.path.basename(img_path)}"
71
- })
72
  img = cv2.imread(img_path)
73
  if img is None:
 
 
74
  notification_queue.put({
75
  "type": "error",
76
- "message": f"Failed to read image: {img_path}"
77
  })
78
  return ""
79
 
80
  # Log image properties
81
- notification_queue.put({
82
- "type": "info",
83
- "message": f"Processing image: {os.path.basename(img_path)}\nImage shape: {img.shape}\nImage type: {img.dtype}"
84
- })
85
 
86
  # Process the image
87
- notification_queue.put({
88
- "type": "info",
89
- "message": "Converting image to text regions..."
90
- })
91
  imgs = convert_image(img)
92
  if not imgs:
93
- notification_queue.put({
94
- "type": "warning",
95
- "message": "No text regions detected in image. Processing whole image..."
96
- })
97
  # Try processing the whole image as one region
98
  temp_path = os.path.join(tempfile.gettempdir(), 'whole_image.png')
99
  cv2.imwrite(temp_path, img)
100
  imgs = [temp_path]
101
 
102
- notification_queue.put({
103
- "type": "info",
104
- "message": f"Found {len(imgs)} text regions"
105
- })
106
 
107
- notification_queue.put({
108
- "type": "info",
109
- "message": "Processing text regions..."
110
- })
111
  images_path = struck_images(imgs)
112
  if not images_path:
 
 
113
  notification_queue.put({
114
  "type": "error",
115
- "message": "No valid text regions after processing"
116
  })
117
  return ""
118
 
119
- notification_queue.put({
120
- "type": "info",
121
- "message": "Extracting text from regions..."
122
- })
123
  t = text(images_path)
124
  if not t:
 
 
125
  notification_queue.put({
126
  "type": "error",
127
- "message": "No text could be extracted from image"
128
  })
129
  return ""
130
 
131
- notification_queue.put({
132
- "type": "info",
133
- "message": "Performing spell checking..."
134
- })
135
  t = spell_grammer(t)
136
 
 
137
  notification_queue.put({
138
  "type": "success",
139
  "message": "Text extraction complete",
@@ -144,10 +132,11 @@ def extract_text_from_image(img_path):
144
  return t
145
 
146
  except Exception as e:
147
- error_msg = str(e)
 
148
  notification_queue.put({
149
  "type": "error",
150
- "message": f"Error in text extraction: {error_msg}"
151
  })
152
  return ""
153
 
 
56
 
57
  def extract_text_from_image(img_path):
58
  try:
59
+ # Log start of text extraction
60
+ log_print(f"Starting text extraction for image: {img_path}")
61
+
62
  # Ensure the image exists
63
  if not os.path.exists(img_path):
64
+ error_msg = f"Image file not found: {img_path}"
65
+ log_print(error_msg, "ERROR")
66
  notification_queue.put({
67
  "type": "error",
68
+ "message": error_msg
69
  })
70
  return ""
71
 
72
  # Read the image
73
+ log_print(f"Reading image: {os.path.basename(img_path)}")
 
 
 
74
  img = cv2.imread(img_path)
75
  if img is None:
76
+ error_msg = f"Failed to read image: {img_path}"
77
+ log_print(error_msg, "ERROR")
78
  notification_queue.put({
79
  "type": "error",
80
+ "message": error_msg
81
  })
82
  return ""
83
 
84
  # Log image properties
85
+ log_print(f"Image properties - Shape: {img.shape}, Type: {img.dtype}")
 
 
 
86
 
87
  # Process the image
88
+ log_print("Converting image to text regions...")
 
 
 
89
  imgs = convert_image(img)
90
  if not imgs:
91
+ log_print("No text regions detected, processing whole image...", "WARNING")
 
 
 
92
  # Try processing the whole image as one region
93
  temp_path = os.path.join(tempfile.gettempdir(), 'whole_image.png')
94
  cv2.imwrite(temp_path, img)
95
  imgs = [temp_path]
96
 
97
+ log_print(f"Found {len(imgs)} text regions")
 
 
 
98
 
99
+ log_print("Processing text regions...")
 
 
 
100
  images_path = struck_images(imgs)
101
  if not images_path:
102
+ error_msg = "No valid text regions after processing"
103
+ log_print(error_msg, "ERROR")
104
  notification_queue.put({
105
  "type": "error",
106
+ "message": error_msg
107
  })
108
  return ""
109
 
110
+ log_print("Extracting text from regions...")
 
 
 
111
  t = text(images_path)
112
  if not t:
113
+ error_msg = "No text could be extracted from image"
114
+ log_print(error_msg, "ERROR")
115
  notification_queue.put({
116
  "type": "error",
117
+ "message": error_msg
118
  })
119
  return ""
120
 
121
+ log_print("Performing spell checking...")
 
 
 
122
  t = spell_grammer(t)
123
 
124
+ log_print(f"Successfully extracted text: {t}")
125
  notification_queue.put({
126
  "type": "success",
127
  "message": "Text extraction complete",
 
132
  return t
133
 
134
  except Exception as e:
135
+ error_msg = f"Error in text extraction: {str(e)}"
136
+ log_print(error_msg, "ERROR")
137
  notification_queue.put({
138
  "type": "error",
139
+ "message": error_msg
140
  })
141
  return ""
142
 
HTR/hcr.py CHANGED
@@ -32,33 +32,65 @@ def text(image_cv):
32
  try:
33
  # Initialize model if not already initialized
34
  if processor is None or model is None:
 
35
  initialize_model()
36
 
 
 
 
37
  if not isinstance(image_cv, list):
38
  image_cv = [image_cv]
39
 
40
  t = ""
41
  total_images = len(image_cv)
42
- notification_queue.put({
43
- "type": "info",
44
- "message": f"Processing {total_images} image(s)..."
45
- })
46
- for i in image_cv:
47
- img_rgb = cv2.cvtColor(i, cv2.COLOR_BGR2RGB)
48
- image = Image.fromarray(img_rgb)
49
-
50
- pixel_values = processor(image, return_tensors="pt").pixel_values
51
- generated_ids = model.generate(pixel_values)
52
-
53
- generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
54
- t = t + generated_text.replace(" ", "") + " "
55
- return t
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
 
57
  except Exception as e:
58
- error_msg = str(e)
 
59
  notification_queue.put({
60
  "type": "error",
61
- "message": f"Error in text function: {error_msg}"
62
  })
63
  return ""
64
 
 
32
  try:
33
  # Initialize model if not already initialized
34
  if processor is None or model is None:
35
+ log_print("TrOCR model not initialized, initializing now...")
36
  initialize_model()
37
 
38
+ if processor is None or model is None:
39
+ raise RuntimeError("Failed to initialize TrOCR model")
40
+
41
  if not isinstance(image_cv, list):
42
  image_cv = [image_cv]
43
 
44
  t = ""
45
  total_images = len(image_cv)
46
+ log_print(f"Processing {total_images} image(s) for text extraction")
47
+
48
+ for i, img in enumerate(image_cv):
49
+ try:
50
+ log_print(f"Processing image {i+1}/{total_images}")
51
+
52
+ # Validate image
53
+ if img is None:
54
+ log_print(f"Skipping image {i+1} - Image is None", "WARNING")
55
+ continue
56
+
57
+ # Convert to RGB
58
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
59
+ image = Image.fromarray(img_rgb)
60
+
61
+ # Get pixel values
62
+ pixel_values = processor(image, return_tensors="pt").pixel_values
63
+ if torch.cuda.is_available():
64
+ pixel_values = pixel_values.to('cuda')
65
+
66
+ # Generate text
67
+ generated_ids = model.generate(pixel_values)
68
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
69
+
70
+ # Clean up the text
71
+ cleaned_text = generated_text.replace(" ", "")
72
+ t = t + cleaned_text + " "
73
+
74
+ log_print(f"Successfully extracted text from image {i+1}: {cleaned_text}")
75
+
76
+ # Clean up CUDA memory
77
+ if torch.cuda.is_available():
78
+ del pixel_values
79
+ del generated_ids
80
+ torch.cuda.empty_cache()
81
+
82
+ except Exception as e:
83
+ log_print(f"Error processing image {i+1}: {str(e)}", "ERROR")
84
+ continue
85
+
86
+ return t.strip()
87
 
88
  except Exception as e:
89
+ error_msg = f"Error in text function: {str(e)}"
90
+ log_print(error_msg, "ERROR")
91
  notification_queue.put({
92
  "type": "error",
93
+ "message": error_msg
94
  })
95
  return ""
96
 
main.py CHANGED
@@ -572,7 +572,8 @@ def compute_marks():
572
  logger.info(f"Processing student: {student}, image: {filename}")
573
  logger.info(f"Extracted text: {s_answer}")
574
 
575
- if not s_answer:
 
576
  logger.warning(f"No text extracted from {image_path}")
577
  results.append({
578
  "subfolder": student,
@@ -580,7 +581,7 @@ def compute_marks():
580
  "marks": 0,
581
  "extracted_text": "",
582
  "correct_answer": answers[count],
583
- "error": "No text could be extracted from image"
584
  })
585
  count += 1
586
  continue
@@ -617,7 +618,7 @@ def compute_marks():
617
  "marks": 0,
618
  "extracted_text": "",
619
  "correct_answer": answers[count] if count < len(answers) else [],
620
- "error": str(e)
621
  })
622
  count += 1
623
  continue
@@ -638,7 +639,8 @@ def compute_marks():
638
  "debug_info": {
639
  "total_students": len(data),
640
  "total_answers": len(answers),
641
- "answers_processed": count
 
642
  }
643
  }), 200
644
 
 
572
  logger.info(f"Processing student: {student}, image: {filename}")
573
  logger.info(f"Extracted text: {s_answer}")
574
 
575
+ # Handle case where text extraction fails
576
+ if s_answer is None or s_answer.strip() == '':
577
  logger.warning(f"No text extracted from {image_path}")
578
  results.append({
579
  "subfolder": student,
 
581
  "marks": 0,
582
  "extracted_text": "",
583
  "correct_answer": answers[count],
584
+ "error": "No text could be extracted from image. Please check image quality."
585
  })
586
  count += 1
587
  continue
 
618
  "marks": 0,
619
  "extracted_text": "",
620
  "correct_answer": answers[count] if count < len(answers) else [],
621
+ "error": f"Error processing image: {str(e)}"
622
  })
623
  count += 1
624
  continue
 
639
  "debug_info": {
640
  "total_students": len(data),
641
  "total_answers": len(answers),
642
+ "answers_processed": count,
643
+ "successful_extractions": len([r for r in results if r.get('extracted_text')])
644
  }
645
  }), 200
646
 
templates/2.html CHANGED
@@ -782,6 +782,29 @@
782
  font-family: monospace;
783
  line-height: 1.5;
784
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
785
  </style>
786
  </head>
787
  <body>
@@ -905,6 +928,19 @@
905
  </div>
906
  </div>
907
 
 
 
 
 
 
 
 
 
 
 
 
 
 
908
  <script>
909
  const notificationSystem = {
910
  container: null,
@@ -1486,51 +1522,56 @@
1486
  groupedResults[result.subfolder].push(result);
1487
  });
1488
 
1489
- // Create summary section
 
 
 
 
 
 
1490
  const summarySection = document.createElement('div');
1491
  summarySection.className = 'marks-summary';
1492
  summarySection.innerHTML = `
1493
  <h4>Processing Summary</h4>
1494
- <p>Total students processed: ${Object.keys(groupedResults).length}</p>
1495
- <p>Total answers evaluated: ${results.length}</p>
1496
- `;
1497
-
1498
- // Update table headers to include the View Text column
1499
- const table = document.getElementById('marks-table');
1500
- table.querySelector('thead tr').innerHTML = `
1501
- <th>Student Folder</th>
1502
- <th>Image Name</th>
1503
- <th>Marks</th>
1504
- <th>Actions</th>
1505
  `;
1506
 
1507
- // Insert summary before the table
1508
- const tableContainer = document.getElementById('marks-table-container');
1509
- tableContainer.insertBefore(summarySection, tableContainer.firstChild);
1510
-
1511
  // Display results in table
1512
  Object.entries(groupedResults).forEach(([subfolder, folderResults]) => {
1513
  folderResults.forEach(result => {
1514
  const row = document.createElement('tr');
1515
  row.innerHTML = `
1516
- <td>${result.subfolder}</td>
1517
- <td>${result.image}</td>
1518
- <td>${result.marks.toFixed(2)}</td>
 
 
 
 
1519
  <td>
1520
  <button class="view-text-btn" onclick='showTextModal(${JSON.stringify({
1521
  extracted_text: result.extracted_text,
1522
  correct_answer: result.correct_answer,
1523
  marks: result.marks,
1524
  image: result.image,
1525
- subfolder: result.subfolder
 
1526
  })})'>
1527
- View Text
1528
  </button>
1529
  </td>
1530
  `;
1531
  tableBody.appendChild(row);
1532
  });
1533
  });
 
 
 
 
1534
  }
1535
 
1536
  function showTextModal(result) {
@@ -1539,19 +1580,23 @@
1539
  const correctAnswer = document.getElementById('correctAnswer');
1540
 
1541
  // Handle extracted text
1542
- if (result.extracted_text === undefined || result.extracted_text === null || result.extracted_text.trim() === '') {
 
 
 
1543
  extractedText.innerHTML = '<span style="color: #721c24; background-color: #f8d7da; padding: 10px; border-radius: 4px; display: block;">No text could be extracted from the image. This might be due to poor image quality or unreadable handwriting.</span>';
1544
  } else {
1545
- extractedText.textContent = result.extracted_text;
1546
  }
1547
 
1548
  // Handle correct answer
1549
  if (!result.correct_answer || (Array.isArray(result.correct_answer) && result.correct_answer.length === 0)) {
1550
  correctAnswer.innerHTML = '<span style="color: #856404; background-color: #fff3cd; padding: 10px; border-radius: 4px; display: block;">No correct answer available. Please ensure you have generated answers first.</span>';
1551
  } else {
1552
- correctAnswer.textContent = Array.isArray(result.correct_answer)
1553
  ? result.correct_answer.filter(ans => ans && ans.trim() !== '').join('\n\n')
1554
  : result.correct_answer;
 
1555
  }
1556
 
1557
  // Add additional information if available
@@ -1565,8 +1610,8 @@
1565
  <h4>Additional Information</h4>
1566
  <p style="font-family: sans-serif;">
1567
  ${result.marks !== undefined ? `<strong>Marks:</strong> ${result.marks.toFixed(2)}<br>` : ''}
1568
- ${result.image ? `<strong>Image:</strong> ${result.image}<br>` : ''}
1569
- ${result.subfolder ? `<strong>Student Folder:</strong> ${result.subfolder}` : ''}
1570
  </p>
1571
  `;
1572
 
@@ -1582,13 +1627,22 @@
1582
  correct_answer: result.correct_answer,
1583
  marks: result.marks,
1584
  image: result.image,
1585
- subfolder: result.subfolder
 
1586
  });
1587
 
1588
  // Show the modal
1589
  modal.style.display = 'block';
1590
  }
1591
 
 
 
 
 
 
 
 
 
1592
  function closeModal() {
1593
  const modal = document.getElementById('textModal');
1594
  modal.style.display = 'none';
@@ -1727,6 +1781,120 @@
1727
  window.addEventListener('beforeunload', function() {
1728
  stopLogRefresh();
1729
  });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1730
  </script>
1731
  </body>
1732
  </html>
 
782
  font-family: monospace;
783
  line-height: 1.5;
784
  }
785
+
786
+ /* Add these styles if not already present */
787
+ .log-entry {
788
+ padding: 5px;
789
+ margin: 2px 0;
790
+ border-radius: 4px;
791
+ }
792
+ .log-info {
793
+ background-color: #e3f2fd;
794
+ color: #0d47a1;
795
+ }
796
+ .log-error {
797
+ background-color: #ffebee;
798
+ color: #c62828;
799
+ }
800
+ .log-warning {
801
+ background-color: #fff3e0;
802
+ color: #ef6c00;
803
+ }
804
+ .log-success {
805
+ background-color: #e8f5e9;
806
+ color: #2e7d32;
807
+ }
808
  </style>
809
  </head>
810
  <body>
 
928
  </div>
929
  </div>
930
 
931
+ <!-- Add this right after your existing modal -->
932
+ <div id="logModal" class="modal">
933
+ <div class="modal-content" style="width: 80%; max-height: 80vh; margin: 5% auto;">
934
+ <div class="modal-header">
935
+ <h2>Processing Logs</h2>
936
+ <span class="close" onclick="document.getElementById('logModal').style.display='none'">&times;</span>
937
+ </div>
938
+ <div class="modal-body" style="max-height: 60vh; overflow-y: auto;">
939
+ <pre id="logContent" style="white-space: pre-wrap; word-wrap: break-word; font-family: monospace; line-height: 1.5;"></pre>
940
+ </div>
941
+ </div>
942
+ </div>
943
+
944
  <script>
945
  const notificationSystem = {
946
  container: null,
 
1522
  groupedResults[result.subfolder].push(result);
1523
  });
1524
 
1525
+ // Calculate statistics
1526
+ const totalStudents = Object.keys(groupedResults).length;
1527
+ const totalAnswers = results.length;
1528
+ const successfulExtractions = results.filter(r => r.extracted_text && !r.error).length;
1529
+ const failedExtractions = results.filter(r => !r.extracted_text || r.error).length;
1530
+
1531
+ // Create summary section with more detailed information
1532
  const summarySection = document.createElement('div');
1533
  summarySection.className = 'marks-summary';
1534
  summarySection.innerHTML = `
1535
  <h4>Processing Summary</h4>
1536
+ <p>Total students processed: ${totalStudents}</p>
1537
+ <p>Total answers evaluated: ${totalAnswers}</p>
1538
+ <p>Successful text extractions: ${successfulExtractions}</p>
1539
+ <p>Failed text extractions: ${failedExtractions}</p>
1540
+ ${failedExtractions > 0 ? '<p style="color: #721c24;">⚠️ Some text extractions failed. Click "View Text" to see details.</p>' : ''}
 
 
 
 
 
 
1541
  `;
1542
 
 
 
 
 
1543
  // Display results in table
1544
  Object.entries(groupedResults).forEach(([subfolder, folderResults]) => {
1545
  folderResults.forEach(result => {
1546
  const row = document.createElement('tr');
1547
  row.innerHTML = `
1548
+ <td>${escapeHtml(result.subfolder)}</td>
1549
+ <td>${escapeHtml(result.image)}</td>
1550
+ <td>
1551
+ ${result.error ?
1552
+ `<span style="color: #721c24;">0.00 ⚠️</span>` :
1553
+ result.marks.toFixed(2)}
1554
+ </td>
1555
  <td>
1556
  <button class="view-text-btn" onclick='showTextModal(${JSON.stringify({
1557
  extracted_text: result.extracted_text,
1558
  correct_answer: result.correct_answer,
1559
  marks: result.marks,
1560
  image: result.image,
1561
+ subfolder: result.subfolder,
1562
+ error: result.error
1563
  })})'>
1564
+ ${result.error ? 'View Error' : 'View Text'}
1565
  </button>
1566
  </td>
1567
  `;
1568
  tableBody.appendChild(row);
1569
  });
1570
  });
1571
+
1572
+ // Insert summary before the table
1573
+ const tableContainer = document.getElementById('marks-table-container');
1574
+ tableContainer.insertBefore(summarySection, tableContainer.firstChild);
1575
  }
1576
 
1577
  function showTextModal(result) {
 
1580
  const correctAnswer = document.getElementById('correctAnswer');
1581
 
1582
  // Handle extracted text
1583
+ if (result.error) {
1584
+ // If there's an error, display it with the error styling
1585
+ extractedText.innerHTML = `<span style="color: #721c24; background-color: #f8d7da; padding: 10px; border-radius: 4px; display: block;">${result.error}</span>`;
1586
+ } else if (!result.extracted_text || result.extracted_text.trim() === '') {
1587
  extractedText.innerHTML = '<span style="color: #721c24; background-color: #f8d7da; padding: 10px; border-radius: 4px; display: block;">No text could be extracted from the image. This might be due to poor image quality or unreadable handwriting.</span>';
1588
  } else {
1589
+ extractedText.innerHTML = `<pre style="white-space: pre-wrap; word-wrap: break-word; margin: 0; font-family: monospace; line-height: 1.5;">${escapeHtml(result.extracted_text)}</pre>`;
1590
  }
1591
 
1592
  // Handle correct answer
1593
  if (!result.correct_answer || (Array.isArray(result.correct_answer) && result.correct_answer.length === 0)) {
1594
  correctAnswer.innerHTML = '<span style="color: #856404; background-color: #fff3cd; padding: 10px; border-radius: 4px; display: block;">No correct answer available. Please ensure you have generated answers first.</span>';
1595
  } else {
1596
+ const formattedAnswer = Array.isArray(result.correct_answer)
1597
  ? result.correct_answer.filter(ans => ans && ans.trim() !== '').join('\n\n')
1598
  : result.correct_answer;
1599
+ correctAnswer.innerHTML = `<pre style="white-space: pre-wrap; word-wrap: break-word; margin: 0; font-family: monospace; line-height: 1.5;">${escapeHtml(formattedAnswer)}</pre>`;
1600
  }
1601
 
1602
  // Add additional information if available
 
1610
  <h4>Additional Information</h4>
1611
  <p style="font-family: sans-serif;">
1612
  ${result.marks !== undefined ? `<strong>Marks:</strong> ${result.marks.toFixed(2)}<br>` : ''}
1613
+ ${result.image ? `<strong>Image:</strong> ${escapeHtml(result.image)}<br>` : ''}
1614
+ ${result.subfolder ? `<strong>Student Folder:</strong> ${escapeHtml(result.subfolder)}` : ''}
1615
  </p>
1616
  `;
1617
 
 
1627
  correct_answer: result.correct_answer,
1628
  marks: result.marks,
1629
  image: result.image,
1630
+ subfolder: result.subfolder,
1631
+ error: result.error
1632
  });
1633
 
1634
  // Show the modal
1635
  modal.style.display = 'block';
1636
  }
1637
 
1638
+ // Helper function to escape HTML special characters
1639
+ function escapeHtml(text) {
1640
+ if (!text) return '';
1641
+ const div = document.createElement('div');
1642
+ div.textContent = text;
1643
+ return div.innerHTML;
1644
+ }
1645
+
1646
  function closeModal() {
1647
  const modal = document.getElementById('textModal');
1648
  modal.style.display = 'none';
 
1781
  window.addEventListener('beforeunload', function() {
1782
  stopLogRefresh();
1783
  });
1784
+
1785
+ // Add this to your existing JavaScript
1786
+ let logMessages = [];
1787
+
1788
+ function addLogMessage(message, type = 'info') {
1789
+ const logContent = document.getElementById('logContent');
1790
+ const entry = document.createElement('div');
1791
+ entry.className = `log-entry log-${type.toLowerCase()}`;
1792
+ entry.textContent = message;
1793
+ logContent.appendChild(entry);
1794
+ logContent.scrollTop = logContent.scrollHeight;
1795
+
1796
+ // Keep only last 100 messages
1797
+ logMessages.push({ message, type });
1798
+ if (logMessages.length > 100) {
1799
+ logMessages.shift();
1800
+ // Remove first child from logContent
1801
+ if (logContent.firstChild) {
1802
+ logContent.removeChild(logContent.firstChild);
1803
+ }
1804
+ }
1805
+ }
1806
+
1807
+ function showLogModal() {
1808
+ document.getElementById('logModal').style.display = 'block';
1809
+ }
1810
+
1811
+ // Update your existing event source handling
1812
+ if (typeof eventSource !== 'undefined') {
1813
+ eventSource.close();
1814
+ }
1815
+
1816
+ eventSource = new EventSource('/notifications');
1817
+ eventSource.onmessage = function(event) {
1818
+ const data = JSON.parse(event.data);
1819
+ if (data.type === 'ping') return;
1820
+
1821
+ // Add message to log modal
1822
+ if (data.message) {
1823
+ let type = data.type;
1824
+ if (typeof data.message === 'object' && data.message.message) {
1825
+ addLogMessage(data.message.message, type);
1826
+ } else {
1827
+ addLogMessage(data.message, type);
1828
+ }
1829
+ }
1830
+
1831
+ // Show notification
1832
+ if (data.type === 'error') {
1833
+ showError(data.message);
1834
+ } else if (data.type === 'success') {
1835
+ showSuccess(data.message);
1836
+ } else if (data.type === 'info') {
1837
+ showInfo(data.message);
1838
+ }
1839
+ };
1840
+
1841
+ // Update your compute_marks function
1842
+ async function computeMarks() {
1843
+ try {
1844
+ // Show log modal
1845
+ showLogModal();
1846
+ addLogMessage("Starting marks computation...", "info");
1847
+
1848
+ // ... rest of your existing compute_marks code ...
1849
+
1850
+ const formData = new FormData();
1851
+ formData.append('answers', JSON.stringify(answers));
1852
+
1853
+ // Append files
1854
+ const files = document.getElementById('file').files;
1855
+ for (let i = 0; i < files.length; i++) {
1856
+ formData.append('file', files[i]);
1857
+ }
1858
+
1859
+ addLogMessage("Uploading files and computing marks...", "info");
1860
+
1861
+ const response = await fetch('/compute_marks', {
1862
+ method: 'POST',
1863
+ body: formData
1864
+ });
1865
+
1866
+ if (!response.ok) {
1867
+ throw new Error(`HTTP error! status: ${response.status}`);
1868
+ }
1869
+
1870
+ const result = await response.json();
1871
+ if (result.error) {
1872
+ addLogMessage(`Error: ${result.error}`, "error");
1873
+ throw new Error(result.error);
1874
+ }
1875
+
1876
+ addLogMessage("Successfully computed marks!", "success");
1877
+ displayMarks(result.results);
1878
+
1879
+ } catch (error) {
1880
+ addLogMessage(`Error: ${error.message}`, "error");
1881
+ showError(error.message);
1882
+ }
1883
+ }
1884
+
1885
+ // Update your compute_answers function similarly
1886
+ async function computeAnswers() {
1887
+ try {
1888
+ showLogModal();
1889
+ addLogMessage("Starting answer computation...", "info");
1890
+
1891
+ // ... rest of your existing compute_answers code ...
1892
+
1893
+ } catch (error) {
1894
+ addLogMessage(`Error: ${error.message}`, "error");
1895
+ showError(error.message);
1896
+ }
1897
+ }
1898
  </script>
1899
  </body>
1900
  </html>
utils.py CHANGED
@@ -1,6 +1,7 @@
1
  import queue
2
  import logging
3
  import sys
 
4
 
5
  # Create notification queue
6
  notification_queue = queue.Queue()
@@ -14,9 +15,30 @@ logging.basicConfig(
14
  logger = logging.getLogger(__name__)
15
 
16
  def log_print(message, level="INFO"):
17
- if level == "INFO":
18
- logger.info(message)
19
- elif level == "ERROR":
20
- logger.error(message)
21
- elif level == "WARNING":
22
- logger.warning(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import queue
2
  import logging
3
  import sys
4
+ from datetime import datetime
5
 
6
  # Create notification queue
7
  notification_queue = queue.Queue()
 
15
  logger = logging.getLogger(__name__)
16
 
17
  def log_print(message, level="INFO"):
18
+ """
19
+ Log a message both to the console and send it to the frontend via notification queue
20
+
21
+ Args:
22
+ message (str): The message to log
23
+ level (str): The log level (INFO, WARNING, ERROR, SUCCESS)
24
+ """
25
+ # Get timestamp
26
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
27
+
28
+ # Log to console
29
+ print(f"[{timestamp}] {level}: {message}")
30
+
31
+ # Map log level to notification type
32
+ type_map = {
33
+ "INFO": "info",
34
+ "WARNING": "warning",
35
+ "ERROR": "error",
36
+ "SUCCESS": "success"
37
+ }
38
+
39
+ # Send to frontend via notification queue
40
+ notification_queue.put({
41
+ "type": type_map.get(level, "info"),
42
+ "message": f"{message}",
43
+ "timestamp": timestamp
44
+ })