Spaces:
Runtime error
Runtime error
File size: 8,413 Bytes
52dd25a 488d271 c0356b8 86352a1 52dd25a bf0e92a 96e27e2 a577a1b bf0e92a 96e27e2 3397b77 96e27e2 14aa07d 96e27e2 52dd25a 96e27e2 1d6fc38 96e27e2 45aa7e8 7d948ee 96e27e2 e5eff16 96e27e2 e5eff16 96e27e2 4d98796 a093a83 c39064d a093a83 86352a1 aaca918 35c6bba 89c1b0d 6444711 2cde08d 13ff863 2603eae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import cv2
import numpy as np
import utlis
import gradio as gr
def fun1(img):
return img
########################################################################
def process_video(image_path):
webCamFeed = True
pathImage = "5.jpg"
# cap = cv2.VideoCapture(image_path)
# cap.set(10,160)
heightImg = 700
widthImg = 700
questions=5
choices=5
ans= [1,2,0,2,4]
########################################################################
count=0
# while True:
# if webCamFeed:success, img = cap.read()
# else:img = cv2.imread(pathImage)
img=cv2.imread(image_path) #read image from filepath
img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
imgFinal = img.copy()
imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY
try:
## FIND ALL COUNTOURS
imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE
if biggestPoints.size != 0 and gradePoints.size != 0:
# BIGGEST RECTANGLE WARPING
biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE
# SECOND BIGGEST RECTANGLE WARPING
cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20) # DRAW THE BIGGEST CONTOUR
gradePoints = utlis.reorder(gradePoints) # REORDER FOR WARPING
ptsG1 = np.float32(gradePoints) # PREPARE POINTS FOR WARP
ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]]) # PREPARE POINTS FOR WARP
matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)# GET TRANSFORMATION MATRIX
imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150)) # APPLY WARP PERSPECTIVE
# APPLY THRESHOLD
imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE
boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
# cv2.imshow("Split Test ", boxes[3])
countR=0
countC=0
myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
for image in boxes:
#cv2.imshow(str(countR)+str(countC),image)
totalPixels = cv2.countNonZero(image)
myPixelVal[countR][countC]= totalPixels
countC += 1
if (countC==choices):countC=0;countR +=1
# FIND THE USER ANSWERS AND PUT THEM IN A LIST
myIndex=[]
for x in range (0,questions):
arr = myPixelVal[x]
myIndexVal = np.where(arr == np.amax(arr))
myIndex.append(myIndexVal[0][0])
#print("USER ANSWERS",myIndex)
# COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
grading=[]
for x in range(0,questions):
if ans[x] == myIndex[x]:
grading.append(1)
else:grading.append(0)
#print("GRADING",grading)
score = (sum(grading)/questions)*100 # FINAL GRADE
#print("SCORE",score)
# DISPLAYING ANSWERS
utlis.showAnswers(imgWarpColored,myIndex,grading,ans) # DRAW DETECTED ANSWERS
utlis.drawGrid(imgWarpColored) # DRAW GRID
imgRawDrawings = np.zeros_like(imgWarpColored) # NEW BLANK IMAGE WITH WARP IMAGE SIZE
utlis.showAnswers(imgRawDrawings, myIndex, grading, ans) # DRAW ON NEW IMAGE
invMatrix = cv2.getPerspectiveTransform(pts2, pts1) # INVERSE TRANSFORMATION MATRIX
imgInvWarp = cv2.warpPerspective(imgRawDrawings, invMatrix, (widthImg, heightImg)) # INV IMAGE WARP
# DISPLAY GRADE
imgRawGrade = np.zeros_like(imgGradeDisplay,np.uint8) # NEW BLANK IMAGE WITH GRADE AREA SIZE
cv2.putText(imgRawGrade,str(int(score))+"%",(70,100)
,cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255)) # ADD THE GRADE TO NEW IMAGE
invMatrixG = cv2.getPerspectiveTransform(ptsG2, ptsG1) # INVERSE TRANSFORMATION MATRIX
imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg)) # INV IMAGE WARP
# SHOW ANSWERS AND GRADE ON FINAL IMAGE
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1,0)
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1,0)
# IMAGE ARRAY FOR DISPLAY
imageArray = ([img,imgGray,imgCanny,imgContours],
[imgBigContour,imgThresh,imgWarpColored,imgFinal])
# cv2.imshow("Final Result", imgFinal)
# yield imgFinal,None
except:
imageArray = ([img,imgGray,imgCanny,imgContours],
[imgBlank, imgBlank, imgBlank, imgBlank])
# LABELS FOR DISPLAY
lables = [["Original","Gray","Edges","Contours"],
["Biggest Contour","Threshold","Warpped","Final"]]
stackedImage = utlis.stackImages(imageArray,0.5,lables)
# cv2.imshow("Result",stackedImage)
stackedImage = cv2.resize(stackedImage,(900,800))
yield imgFinal,stackedImage
# SAVE IMAGE WHEN 's' key is pressed
# cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgFinal)
# cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
# (1100, 350), (0, 255, 0), cv2.FILLED)
# cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
# cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
# cv2.imshow('Result', stackedImage)
# yield stackedImage
# cv2.waitKey(300)
count += 1
description_markdown = """
# OMR Grading tool made by Harpreet Singh
## Usage
This tool expects an OMR sheet image consisting of 5 questions as input. Upon submission, it will process the image and provide an output with calculated obtained percentage displayed in Grade box available on image.
## Disclaimer
Please note that this tool is for research purposes only and may not always be 100% accurate. Users are advised to exercise discretion and supervise the tool's usage accordingly.
## Developer Contact
For further inquiries or permissions, you can reach out to the developer through the following social media accounts:
- [LinkedIn](https://www.linkedin.com/in/harpreet-singh-4b1657251?utm_source=share&utm_campaign=share_via&utm_content=profile&utm_medium=android_app)
- [GitHub](https://github.com/Harpreet-1313)
"""
app=gr.Interface(
fn=process_video,
# fn= fun1,
inputs=gr.Image(type="filepath"),
outputs=[gr.Image(),gr.Image()],
examples=[["1.jpg"],["2.jpg"]],
title="OMR Grading App",
description= description_markdown,
theme=gr.themes.Soft())
app.launch(auth=("username","password"),auth_message="Please Login") |