mikecho commited on
Commit
6763841
·
verified ·
1 Parent(s): fe7208c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -0
app.py ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from transformers import pipeline
3
+
4
+ def main():
5
+ st.set_page_config(page_title="Unmasked the Target Customers", page_icon="🦜")
6
+ st.header("Turn the photos taken in the campaign to useful marketing insights")
7
+ uploaded_file = st.file_uploader("Select an Image...")
8
+
9
+ def pipeline_1_final(image_lst):
10
+ pipe = pipeline("object-detection", model="hustvl/yolos-tiny")
11
+ preds = pipe(image)
12
+ person_count = 0
13
+ sub_image_lst = []
14
+ for pred in preds:
15
+ if pred['label'] == 'person':
16
+ person_count +=1
17
+ box = pred['box']
18
+ xmin, ymin, xmax, ymax = box.values()
19
+ sub_image = extract_subimage(image,xmin, xmax, ymin, ymax)
20
+ sub_image_lst += [sub_image]
21
+ return sub_image_lst, person_count
22
+
23
+ def pipeline_2_final(image_lst):
24
+ age_lst = []
25
+ age_mapping = {"0-2": "lower than 10",
26
+ "3-9": "lower than 10",
27
+ "10-19":"10-19",
28
+ "20-29":"20-29",
29
+ "30-39":"30-39",
30
+ "40-49":"40-49",
31
+ "50-59":"50-59",
32
+ "60-69":"60-69",
33
+ "more than 70" : "70 or above"}
34
+ pipe = pipeline("image-classification", model="nateraw/vit-age-classifier")
35
+ for image in image_lst:
36
+ preds = pipe(image)
37
+ preds_age_range = preds[0]['label']
38
+ preds_age_range = age_mapping[preds_age_range]
39
+ age_lst +=[preds_age_range]
40
+ return age_lst
41
+
42
+ def pipeline_3_final(image_lst):
43
+ gender_lst = []
44
+ pipe = pipeline("image-classification", model="mikecho/NTQAI_pedestrian_gender_recognition_v1")
45
+ for image in image_lst:
46
+ preds = pipe(image)
47
+ preds_gender = preds[0]['label']
48
+ gender_lst +=[preds_gender]
49
+ return gender_lst
50
+
51
+ def gender_prediciton_model_NTQAI_pedestrian_gender_recognition(image_lst):
52
+ gender_lst = []
53
+ pipe = pipeline("image-classification", model="NTQAI/pedestrian_gender_recognition")
54
+ for image in image_lst:
55
+ preds = pipe(image)
56
+ preds_gender = preds[0]['label']
57
+ gender_lst +=[preds_gender]
58
+ return gender_lst
59
+
60
+
61
+ def pipeline_4_final(image_lst):
62
+ start_time = time.time()
63
+ pipe = pipeline("image-classification", model="dima806/facial_emotions_image_detection")
64
+ preds_lst = []
65
+ for image in image_lst:
66
+ preds = pipe(image)
67
+ preds_emotion = preds[0]['label']
68
+ preds_lst +=[preds_emotion]
69
+ return preds_lst
70
+ def generate_gender_tables(gender_list, age_list, emotion_list):
71
+ gender_count = {}
72
+ for gender, age, emotion in zip(gender_list, age_list, emotion_list):
73
+ if age not in gender_count:
74
+ gender_count[age] = {'male': 0, 'female': 0}
75
+ gender_count[age][gender] += 1
76
+ happiness_percentage = {}
77
+ for gender, age, emotion in zip(gender_list, age_list, emotion_list):
78
+ if age not in happiness_percentage:
79
+ happiness_percentage[age] = {'male': 0, 'female': 0}
80
+ if emotion == 'happiness':
81
+ happiness_percentage[age][gender] += 1
82
+
83
+ table1 = []
84
+ for age, count in gender_count.items():
85
+ male_count = count['male']
86
+ female_count = count['female']
87
+ table1.append([age, male_count, female_count])
88
+
89
+ table2 = []
90
+ for age, happiness in happiness_percentage.items():
91
+ male_count = gender_count[age]['male']
92
+ female_count = gender_count[age]['female']
93
+ male_percentage = (happiness['male'] / male_count) * 100 if male_count > 0 else 0
94
+ female_percentage = (happiness['female'] / female_count) * 100 if female_count > 0 else 0
95
+ table2.append([age, male_percentage, female_percentage])
96
+
97
+ return table1, table2
98
+
99
+ if uploaded_file is not None:
100
+ print(uploaded_file)
101
+ bytes_data = uploaded_file.getvalue()
102
+ with open(uploaded_file.name, "wb") as file:
103
+ file.write(bytes_data)
104
+ st.image(uploaded_file, caption="Uploaded Image", use_column_width=True)
105
+
106
+
107
+ pipeline_1_out, person_count = pipeline_1_final(uploaded_file.name)
108
+ pipeline_2_age = pipeline_2_final(pipeline_1_out)
109
+ pipeline_3_gender = pipeline_3_final(pipeline_1_out)
110
+ pipeline_4_emotion = pipeline_3_final(pipeline_1_out)
111
+ table1, table2 = generate_gender_tables(pipeline_3_gender, pipeline_2_age, pipeline_4_emotion)
112
+ st.text('The detected number of person:', person_count)
113
+ st.text('\nGender and Age Group Distribution')
114
+ st.text('Age, Male, Female')
115
+ for row in table1:
116
+ print(row)
117
+
118
+
119
+ st.text('\nShare of Happniess')
120
+ st.text('Age, Male, Female')
121
+ for row in table2:
122
+ print(row)
123
+
124
+
125
+ if __name__ == "__main__":
126
+ main()