Maria Tsilimos commited on
Commit
89f1d44
·
unverified ·
1 Parent(s): ed17ba4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +244 -0
app.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+ import pandas as pd
4
+ import io
5
+ from transformers import pipeline
6
+ from streamlit_extras.stylable_container import stylable_container
7
+ import plotly.express as px
8
+ import zipfile
9
+ from PyPDF2 import PdfReader
10
+ import docx
11
+ import os
12
+ from comet_ml import Experiment
13
+ import re
14
+ import numpy as np
15
+
16
+ st.set_page_config(layout="wide", page_title="Named Entity Recognition App")
17
+
18
+
19
+
20
+ # --- Configuration ---
21
+ COMET_API_KEY = os.environ.get("COMET_API_KEY")
22
+ COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE")
23
+ COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME")
24
+
25
+ comet_initialized = False
26
+ if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME:
27
+ comet_initialized = True
28
+
29
+ # --- Initialize session state ---
30
+ if 'file_upload_attempts' not in st.session_state:
31
+ st.session_state['file_upload_attempts'] = 0
32
+
33
+ max_attempts = 10
34
+
35
+ # --- Helper function for model loading ---
36
+ @st.cache_resource
37
+ def load_ner_model():
38
+ """Loads the pre-trained NER model and caches it."""
39
+ return pipeline("token-classification", model="h2oai/deberta_finetuned_pii", aggregation_strategy="first")
40
+
41
+ # --- UI Elements ---
42
+ st.subheader("9-Personal Data Named Entity Recognition Web App", divider="rainbow")
43
+ st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary")
44
+
45
+ expander = st.expander("**Important notes on the 9-Personal Data Named Entity Recognition Web App**")
46
+ expander.write('''
47
+
48
+ **Named Entities:**
49
+ This 9-Personal Data Named Entity Recognition Web App predicts nine (9) categories:
50
+
51
+ 1. **Account-related information**: Account name, account number, and transaction amounts
52
+
53
+ 2. **Banking details**: BIC, IBAN, and Bitcoin or Ethereum addresses
54
+
55
+ 3. **Personal information**: Full name, first name, middle name, last name, gender, and date of birth
56
+
57
+ 4. **Contact information**: Email, phone number, and street address (including building number, city, county, state, and zip code)
58
+
59
+ 5. **Job-related data**: Job title, job area, job descriptor, and job type
60
+
61
+ 6. **Financial data**: Credit card number, issuer, CVV, and currency information (code, name, and symbol)
62
+
63
+ 7. **Digital identifiers**: IP addresses (IPv4 and IPv6), MAC addresses, and user agents
64
+
65
+ 8. **Online presence**: URL, usernames, and passwords
66
+
67
+ 9. **Other sensitive data**: SSN, vehicle VIN and VRM, phone IMEI, and nearby GPS coordinates
68
+ Results are presented in an easy-to-read table, visualized in an interactive tree map, pie chart, and bar chart, and are available for download along with a Glossary of tags.
69
+
70
+ **How to Use:**
71
+ Upload your .pdf or .docx file. Then, click the 'Results' button to extract and tag entities in your text data.
72
+
73
+ **Usage Limits:**
74
+ You can request results up to 10 times.
75
+
76
+ **Customization:**
77
+ To change the app's background color to white or black, click the three-dot menu on the right-hand side of your app, go to Settings and then Choose app theme, colors and fonts.
78
+
79
+ **Technical issues:**
80
+ If your connection times out, please refresh the page or reopen the app's URL.
81
+
82
+ For any errors or inquiries, please contact us at info@nlpblogs.com
83
+
84
+ ''')
85
+
86
+
87
+
88
+
89
+ with st.sidebar:
90
+ container = st.container(border=True)
91
+ container.write("**Named Entity Recognition (NER)** is the task of extracting and tagging entities in text data. Entities can be persons, organizations, locations, countries, products, events etc.")
92
+ st.subheader("Related NLP Web Apps", divider="rainbow")
93
+ st.link_button("8-Named Entity Recognition Web App", "https://nlpblogs.com/shop/named-entity-recognition-ner/8-named-entity-recognition-web-app/", type="primary")
94
+
95
+ # --- File Upload ---
96
+ upload_file = st.file_uploader("Upload your file. Accepted file formats include: .pdf, .docx", type=['pdf', 'docx'])
97
+ text = None
98
+ df = None
99
+
100
+ if upload_file is not None:
101
+ file_extension = upload_file.name.split('.')[-1].lower()
102
+ if file_extension == 'pdf':
103
+ try:
104
+ pdf_reader = PdfReader(upload_file)
105
+ text = ""
106
+ for page in pdf_reader.pages:
107
+ text += page.extract_text()
108
+ st.write("File uploaded successfully. Due to security protocols, the file content is hidden.")
109
+ except Exception as e:
110
+ st.error(f"An error occurred while reading PDF: {e}")
111
+ text = None
112
+ elif file_extension == 'docx':
113
+ try:
114
+ doc = docx.Document(upload_file)
115
+ text = "\n".join([para.text for para in doc.paragraphs])
116
+ st.write("File uploaded successfully. Due to security protocols, the file content is hidden.")
117
+ except Exception as e:
118
+ st.error(f"An error occurred while reading docx: {e}")
119
+ text = None
120
+ else:
121
+ st.warning("Unsupported file type.")
122
+ text = None
123
+
124
+ st.divider()
125
+
126
+ # --- Results Button and Processing Logic ---
127
+ if st.button("Results"):
128
+ if not comet_initialized:
129
+ st.warning("Comet ML not initialized. Check environment variables if you wish to log data.")
130
+
131
+ if st.session_state['file_upload_attempts'] >= max_attempts:
132
+ st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.")
133
+ st.stop()
134
+
135
+ if text is None:
136
+ st.warning("Please upload a supported file (.pdf or .docx) before requesting results.")
137
+ st.stop()
138
+
139
+ st.session_state['file_upload_attempts'] += 1
140
+
141
+ with st.spinner("Analyzing text...", show_time=True):
142
+ # Load model (cached)
143
+ model = load_ner_model()
144
+ text_entities = model(text)
145
+ df = pd.DataFrame(text_entities)
146
+
147
+ # Clean and filter DataFrame
148
+ pattern = r'[^\w\s]'
149
+ df['word'] = df['word'].replace(pattern, '', regex=True)
150
+ df = df.replace('', 'Unknown').dropna()
151
+
152
+ if df.empty:
153
+ st.warning("No entities were extracted from the uploaded text.")
154
+ st.stop()
155
+
156
+ if comet_initialized:
157
+ experiment = Experiment(
158
+ api_key=COMET_API_KEY,
159
+ workspace=COMET_WORKSPACE,
160
+ project_name=COMET_PROJECT_NAME,
161
+ )
162
+ experiment.log_parameter("input_text_length", len(text))
163
+ experiment.log_table("predicted_entities", df)
164
+
165
+ # --- Display Results ---
166
+ properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"}
167
+ df_styled = df.style.set_properties(**properties)
168
+ st.dataframe(df_styled, use_container_width=True)
169
+
170
+ with st.expander("See Glossary of tags"):
171
+ st.write('''
172
+ '**word**': ['entity extracted from your text data']
173
+
174
+ '**score**': ['accuracy score; how accurately a tag has been assigned to a given entity']
175
+
176
+ '**entity_group**': ['label (tag) assigned to a given extracted entity']
177
+
178
+ '**start**': ['index of the start of the corresponding entity']
179
+
180
+ '**end**': ['index of the end of the corresponding entity']
181
+ ''')
182
+
183
+ # --- Visualizations ---
184
+ st.subheader("Tree map", divider="rainbow")
185
+ fig_treemap = px.treemap(df, path=[px.Constant("all"), 'word', 'entity_group'],
186
+ values='score', color='entity_group')
187
+ fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25))
188
+ st.plotly_chart(fig_treemap)
189
+ if comet_initialized:
190
+ experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap")
191
+
192
+ value_counts1 = df['entity_group'].value_counts()
193
+ final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group"})
194
+
195
+ col1, col2 = st.columns(2)
196
+ with col1:
197
+ st.subheader("Pie Chart", divider="rainbow")
198
+ fig_pie = px.pie(final_df_counts, values='count', names='entity_group', hover_data=['count'], labels={'count': 'count'}, title='Percentage of predicted labels')
199
+ fig_pie.update_traces(textposition='inside', textinfo='percent+label')
200
+ st.plotly_chart(fig_pie)
201
+ if comet_initialized:
202
+ experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart")
203
+
204
+ with col2:
205
+ st.subheader("Bar Chart", divider="rainbow")
206
+ fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", text_auto=True, title='Occurrences of predicted labels')
207
+ st.plotly_chart(fig_bar)
208
+ if comet_initialized:
209
+ experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart")
210
+
211
+ # --- Downloadable Content ---
212
+ dfa = pd.DataFrame(
213
+ data={
214
+ 'word': ['entity extracted from your text data'],
215
+ 'score': ['accuracy score; how accurately a tag has been assigned to a given entity'],
216
+ 'entity_group': ['label (tag) assigned to a given extracted entity'],
217
+ 'start': ['index of the start of the corresponding entity'],
218
+ 'end': ['index of the end of the corresponding entity'],
219
+ })
220
+
221
+ buf = io.BytesIO()
222
+ with zipfile.ZipFile(buf, "w") as myzip:
223
+ myzip.writestr("Summary of the results.csv", df.to_csv(index=False))
224
+ myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False))
225
+
226
+ with stylable_container(
227
+ key="download_button",
228
+ css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""",
229
+ ):
230
+ st.download_button(
231
+ label="Download zip file",
232
+ data=buf.getvalue(),
233
+ file_name="nlpblogs_ner_results.zip",
234
+ mime="application/zip",
235
+ )
236
+ if comet_initialized:
237
+ experiment.log_asset(buf.getvalue(), file_name="downloadable_results.zip")
238
+
239
+ st.divider()
240
+ if comet_initialized:
241
+ experiment.end()
242
+
243
+ st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**")
244
+