|
import streamlit as st |
|
import pandas as pd |
|
import io |
|
import plotly.express as px |
|
import zipfile |
|
import os |
|
import re |
|
import numpy as np |
|
import json |
|
import time |
|
|
|
from cryptography.fernet import Fernet |
|
from transformers import pipeline |
|
from streamlit_extras.stylable_container import stylable_container |
|
from comet_ml import Experiment |
|
|
|
st.set_page_config(layout="wide", page_title="Named Entity Recognition App") |
|
|
|
|
|
COMET_API_KEY = os.environ.get("COMET_API_KEY") |
|
COMET_WORKSPACE = os.environ.get("COMET_WORKSPACE") |
|
COMET_PROJECT_NAME = os.environ.get("COMET_PROJECT_NAME") |
|
|
|
comet_initialized = False |
|
if COMET_API_KEY and COMET_WORKSPACE and COMET_PROJECT_NAME: |
|
comet_initialized = True |
|
|
|
|
|
|
|
COUNTER_FILE = "counter.json" |
|
max_attempts = 300 |
|
|
|
|
|
def load_persistent_data(): |
|
""" |
|
Loads the attempts count and file upload history from a persistent JSON file. |
|
Returns default values if the file doesn't exist or is invalid. |
|
""" |
|
if os.path.exists(COUNTER_FILE): |
|
try: |
|
with open(COUNTER_FILE, "r") as f: |
|
data = json.load(f) |
|
return data.get('file_upload_attempts', 0), data.get('file_upload_history', []) |
|
except (json.JSONDecodeError, KeyError): |
|
|
|
return 0, [] |
|
return 0, [] |
|
|
|
def save_persistent_data(attempts, history): |
|
""" |
|
Saves the current attempts count and file upload history to the persistent JSON file. |
|
""" |
|
with open(COUNTER_FILE, "w") as f: |
|
json.dump({'file_upload_attempts': attempts, 'file_upload_history': history}, f, indent=4) |
|
|
|
def clear_history(): |
|
""" |
|
Callback function for the "Clear History" button. |
|
Resets both the file upload counter and the history list, then saves the state. |
|
""" |
|
st.session_state['file_upload_attempts'] = 0 |
|
st.session_state['file_upload_history'] = [] |
|
save_persistent_data(0, []) |
|
|
|
|
|
|
|
|
|
|
|
if 'file_upload_attempts' not in st.session_state: |
|
attempts, history = load_persistent_data() |
|
st.session_state['file_upload_attempts'] = attempts |
|
st.session_state['file_upload_history'] = history |
|
|
|
save_persistent_data(st.session_state['file_upload_attempts'], st.session_state['file_upload_history']) |
|
|
|
|
|
if 'encrypted_extracted_text' not in st.session_state: |
|
st.session_state['encrypted_extracted_text'] = None |
|
|
|
|
|
@st.cache_resource |
|
def load_ner_model(): |
|
""" |
|
Loads the pre-trained NER model (Andrija/M-bert-NER) and caches it. |
|
""" |
|
try: |
|
return pipeline("token-classification", |
|
model="Andrija/M-bert-NER", |
|
stride = 128, |
|
aggregation_strategy="max", |
|
ignore_labels=["O"]) |
|
except Exception as e: |
|
st.error(f"Failed to load NER model. Please check your internet connection or model availability: {e}") |
|
st.stop() |
|
|
|
@st.cache_resource |
|
def load_encryption_key(): |
|
""" |
|
Loads the Fernet encryption key from environment variables. |
|
This key is crucial for encrypting/decrypting sensitive data. |
|
It's cached as a resource to be loaded only once. |
|
""" |
|
try: |
|
key_str = os.environ.get("FERNET_KEY") |
|
if not key_str: |
|
raise ValueError("FERNET_KEY environment variable not set. Cannot perform encryption/decryption. " |
|
"Please set this securely in your deployment environment.") |
|
key_bytes = key_str.encode('utf-8') |
|
return Fernet(key_bytes) |
|
except ValueError as ve: |
|
st.error(f"Configuration Error: {ve}") |
|
st.stop() |
|
except Exception as e: |
|
st.error(f"An unexpected error occurred while loading encryption key: {e}.") |
|
st.stop() |
|
|
|
|
|
fernet = load_encryption_key() |
|
|
|
def encrypt_text(text_content: str) -> bytes: |
|
""" |
|
Encrypts a string using the loaded Fernet cipher. |
|
The input string is first encoded to UTF-8 bytes. |
|
""" |
|
return fernet.encrypt(text_content.encode('utf-8')) |
|
|
|
def decrypt_text(encrypted_bytes: bytes) -> str | None: |
|
""" |
|
Decrypts bytes using the loaded Fernet cipher. |
|
Returns the decrypted string, or None if decryption fails (e.g., tampering). |
|
""" |
|
try: |
|
return fernet.decrypt(encrypted_bytes).decode('utf-8') |
|
except Exception as e: |
|
st.error(f"Decryption failed. This might indicate data tampering or an incorrect encryption key. Error: {e}") |
|
return None |
|
|
|
|
|
st.subheader("Serbo-Croatian CSV-XLSX-XLS Entity Finder", divider="green") |
|
st.link_button("by nlpblogs", "https://nlpblogs.com", type="tertiary") |
|
|
|
expander = st.expander("**Important notes on the Serbo-Croatian CSV-XLSX-XLS Entity Finder**") |
|
expander.write(''' |
|
**Named Entities:** This Serbo-Croatian CSV-XLSX-XLS Entity Finder |
|
predicts four (4) labels (“PER: person”, “LOC: location”, “ORG: |
|
organization”, “MISC: miscellaneous”). Results are presented in an |
|
easy-to-read table, visualized in an interactive tree map, pie chart, |
|
and bar chart, and are available for download along with a Glossary of |
|
tags. |
|
**How to Use:** Upload your CSV, XLSX, or XLS file. Then, click the |
|
'Results' button to extract and tag entities in your text data. |
|
**Usage Limits:** You can request results up to 300 requests within a 30-day period. |
|
**Language settings:** Please check and adjust the language settings in |
|
your computer, so the Serbian and Croatian characters are handled properly in |
|
your downloaded file. |
|
**Customization:** To change the app's background color to white or |
|
black, click the three-dot menu on the right-hand side of your app, go to |
|
Settings and then Choose app theme, colors and fonts. |
|
**Technical issues:** If your connection times out, please refresh the |
|
page or reopen the app's URL. |
|
For any errors or inquiries, please contact us at info@nlpblogs.com |
|
''') |
|
|
|
with st.sidebar: |
|
|
|
|
|
|
|
|
|
|
|
st.subheader("File Upload History", divider="green") |
|
if st.session_state['file_upload_history']: |
|
history_df = pd.DataFrame(st.session_state['file_upload_history']) |
|
st.dataframe(history_df, use_container_width=True, hide_index=True) |
|
else: |
|
st.info("No files have been uploaded yet.") |
|
|
|
|
|
st.button("Clear History", on_click=clear_history) |
|
|
|
st.subheader("Build your own NER Web App in a minute without writing a single line of code.", divider="green") |
|
st.link_button("NER File Builder", "https://nlpblogs.com/shop/named-entity-recognition-ner/ner-file-builder/", type="primary") |
|
|
|
uploaded_file = st.file_uploader("Upload your file. Accepted file formats include: .csv, .xlsx, .xls", type=['xls', 'csv', 'xlsx']) |
|
|
|
|
|
current_run_text = None |
|
|
|
if uploaded_file is not None: |
|
file_extension = uploaded_file.name.split('.')[-1].lower() |
|
df = None |
|
|
|
|
|
if file_extension == 'csv': |
|
try: |
|
uploaded_file.seek(0) |
|
df = pd.read_csv(uploaded_file, na_filter=False, encoding='utf-8') |
|
except UnicodeDecodeError: |
|
try: |
|
uploaded_file.seek(0) |
|
df = pd.read_csv(uploaded_file, na_filter=False, encoding='latin-1') |
|
except UnicodeDecodeError: |
|
st.error("Error: The CSV file could not be decoded with UTF-8 or Latin-1 encoding. Please ensure it's a valid CSV and check its encoding.") |
|
st.stop() |
|
except pd.errors.ParserError: |
|
st.error("Error: The CSV file is not readable or is incorrectly formatted (Latin-1 attempt).") |
|
st.stop() |
|
except Exception as e: |
|
st.error(f"An unexpected error occurred while reading CSV with Latin-1: {e}") |
|
st.stop() |
|
except pd.errors.ParserError: |
|
st.error("Error: The CSV file is not readable or is incorrectly formatted (UTF-8 attempt).") |
|
st.stop() |
|
except Exception as e: |
|
st.error(f"An unexpected error occurred while reading CSV with UTF-8: {e}") |
|
st.stop() |
|
|
|
elif file_extension in ['xlsx', 'xls']: |
|
try: |
|
uploaded_file.seek(0) |
|
df = pd.read_excel(uploaded_file, na_filter=False) |
|
except ValueError: |
|
st.error("Error: The Excel file is not readable or is incorrectly formatted. Please ensure it's a valid Excel file.") |
|
st.stop() |
|
except Exception as e: |
|
st.error(f"An unexpected error occurred while reading Excel: {e}") |
|
st.stop() |
|
else: |
|
st.error(f"Unsupported file format: .{file_extension}. Please upload a .csv, .xlsx, or .xls file.") |
|
st.stop() |
|
|
|
if df is not None: |
|
|
|
columns_to_drop = [] |
|
for col in df.columns: |
|
is_empty_col = True |
|
for cell_value in df[col]: |
|
if pd.isna(cell_value): |
|
continue |
|
elif isinstance(cell_value, str) and cell_value.strip() == '': |
|
continue |
|
else: |
|
is_empty_col = False |
|
break |
|
if is_empty_col: |
|
columns_to_drop.append(col) |
|
|
|
if columns_to_drop: |
|
st.info(f"Automatically removing empty columns: {', '.join(columns_to_drop)}") |
|
df = df.drop(columns=columns_to_drop) |
|
|
|
if df.empty: |
|
st.error("After removing empty columns, the DataFrame is empty. Please upload a file with meaningful content.") |
|
st.stop() |
|
|
|
if df.isnull().values.any(): |
|
st.error(f"Error: The {file_extension.upper()} file contains missing values. Please ensure all cells are filled.") |
|
st.stop() |
|
else: |
|
|
|
|
|
st.session_state['file_upload_history'].append({ |
|
'filename': uploaded_file.name, |
|
'timestamp': time.strftime('%Y-%m-%d %H:%M:%S') |
|
}) |
|
|
|
save_persistent_data(st.session_state['file_upload_attempts'], st.session_state['file_upload_history']) |
|
|
|
df_string_representation = df.to_string(index=False, header=False) |
|
text_content = re.sub(r'[^\w\s.]', '', df_string_representation) |
|
text_content = text_content.replace("Empty DataFrame Columns", "").strip() |
|
|
|
|
|
encrypted_text_bytes = encrypt_text(text_content) |
|
st.session_state['encrypted_extracted_text'] = encrypted_text_bytes |
|
|
|
st.success(f"Successfully loaded {file_extension.upper()} file. File content encrypted and secured. Due to security protocols, the file content is hidden.") |
|
st.divider() |
|
|
|
|
|
if st.button("Results"): |
|
start_time = time.time() |
|
|
|
|
|
experiment = None |
|
if comet_initialized: |
|
try: |
|
experiment = Experiment( |
|
api_key=COMET_API_KEY, |
|
workspace=COMET_WORKSPACE, |
|
project_name=COMET_PROJECT_NAME, |
|
auto_log_text=False |
|
) |
|
except Exception as e: |
|
st.warning(f"Comet ML initialization failed: {e}. Data will not be logged for this session.") |
|
comet_initialized = False |
|
else: |
|
st.warning("Comet ML environment variables not set. Data will not be logged.") |
|
|
|
|
|
if st.session_state['file_upload_attempts'] >= max_attempts: |
|
st.error(f"You have requested results {max_attempts} times. You have reached your daily request limit.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("limit_reached", True) |
|
experiment.end() |
|
st.stop() |
|
|
|
|
|
text_for_ner = None |
|
if st.session_state['encrypted_extracted_text'] is not None: |
|
text_for_ner = decrypt_text(st.session_state['encrypted_extracted_text']) |
|
|
|
if text_for_ner is None or not text_for_ner.strip(): |
|
st.warning("Please upload a supported file (.csv, .xlsx, or .xls) and ensure it contains text before requesting results.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("no_text_uploaded", True) |
|
experiment.end() |
|
st.stop() |
|
|
|
|
|
st.session_state['file_upload_attempts'] += 1 |
|
save_persistent_data(st.session_state['file_upload_attempts'], st.session_state['file_upload_history']) |
|
|
|
with st.spinner("Analyzing text...", show_time=True): |
|
model = load_ner_model() |
|
text_entities = model(text_for_ner) |
|
|
|
|
|
df = pd.DataFrame(text_entities) |
|
|
|
|
|
if df.empty: |
|
st.warning("The model did not extract any entities from the uploaded text. Try a different file or content.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("no_entities_extracted_at_source", True) |
|
experiment.end() |
|
st.stop() |
|
|
|
|
|
if 'word' in df.columns: |
|
df['word'] = df['word'].astype(str).apply(lambda x: re.sub(r'[^\w\s.]', '', x).strip()) |
|
df['word'] = df['word'].replace('', 'Unknown') |
|
else: |
|
st.error("The 'word' column was not found in the extracted entities. Cannot display results.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("word_column_missing", True) |
|
experiment.end() |
|
st.stop() |
|
|
|
|
|
df = df[df['score'].notna()] |
|
df = df[df['word'] != 'Unknown'] |
|
|
|
|
|
if df.empty: |
|
st.warning("After cleaning and filtering, no meaningful entities were extracted from the uploaded text. Try a different file or content.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("no_meaningful_entities_after_cleaning", True) |
|
experiment.end() |
|
st.stop() |
|
|
|
if 'entity_group' in df.columns: |
|
unique_groups = df['entity_group'].unique() |
|
else: |
|
st.error("The 'entity_group' column was not found in the extracted entities. Grouping will not work.") |
|
if comet_initialized and experiment: |
|
experiment.log_other("entity_group_column_missing", True) |
|
experiment.end() |
|
st.stop() |
|
st.divider() |
|
|
|
|
|
if comet_initialized and experiment: |
|
experiment.log_parameter("input_text_length", len(text_for_ner)) |
|
experiment.log_table("predicted_entities", df) |
|
|
|
|
|
properties = {"border": "2px solid gray", "color": "blue", "font-size": "16px"} |
|
df_styled = df.style.set_properties(**properties) |
|
st.dataframe(df_styled, use_container_width=True) |
|
|
|
with st.expander("See Glossary of tags"): |
|
st.write(''' |
|
'**word**': ['entity extracted from your text data'] |
|
|
|
'**score**': ['accuracy score; how accurately a tag has been assigned to |
|
a given entity'] |
|
|
|
'**entity_group**': ['label (tag) assigned to a given extracted entity'] |
|
|
|
'**start**': ['index of the start of the corresponding entity'] |
|
|
|
'**end**': ['index of the end of the corresponding entity'] |
|
''') |
|
|
|
entity_groups = {"per": "person", |
|
"loc": "location", |
|
"org": "organization", |
|
"misc": "miscellaneous", |
|
} |
|
st.subheader("Grouped entities", divider = "green") |
|
|
|
entity_items = list(entity_groups.items()) |
|
tabs_per_row = 4 |
|
for i in range(0, len(entity_items), tabs_per_row): |
|
current_row_entities = entity_items[i : i + tabs_per_row] |
|
tab_titles = [item[1] for item in current_row_entities] |
|
tabs = st.tabs(tab_titles) |
|
for j, (entity_group_key, tab_title) in enumerate(current_row_entities): |
|
with tabs[j]: |
|
if entity_group_key in df["entity_group"].unique(): |
|
df_filtered = df[df["entity_group"] == entity_group_key] |
|
st.dataframe(df_filtered, use_container_width=True) |
|
else: |
|
st.info(f"No '{tab_title}' entities found in the text.") |
|
st.dataframe(pd.DataFrame({ |
|
'entity_group': [entity_group_key], |
|
'score': [np.nan], |
|
'word': [np.nan], |
|
'start': [np.nan], |
|
'end': [np.nan] |
|
}), hide_index=True) |
|
st.divider() |
|
|
|
|
|
st.subheader("Tree map", divider="green") |
|
fig_treemap = px.treemap(df, path=[px.Constant("all"), 'entity_group', 'word'], |
|
values='score', color='entity_group', |
|
) |
|
fig_treemap.update_layout(margin=dict(t=50, l=25, r=25, b=25)) |
|
st.plotly_chart(fig_treemap) |
|
if comet_initialized and experiment: |
|
experiment.log_figure(figure=fig_treemap, figure_name="entity_treemap") |
|
|
|
value_counts1 = df['entity_group'].value_counts() |
|
final_df_counts = value_counts1.reset_index().rename(columns={"index": "entity_group", "count": "count"}) |
|
|
|
col1, col2 = st.columns(2) |
|
with col1: |
|
st.subheader("Pie Chart", divider="green") |
|
fig_pie = px.pie(final_df_counts, values='count', names='entity_group', |
|
hover_data=['count'], labels={'count': 'count'}, |
|
title='Percentage of Predicted Labels') |
|
fig_pie.update_traces(textposition='inside', textinfo='percent+label') |
|
st.plotly_chart(fig_pie) |
|
if comet_initialized and experiment: |
|
experiment.log_figure(figure=fig_pie, figure_name="label_pie_chart") |
|
with col2: |
|
st.subheader("Bar Chart", divider="green") |
|
fig_bar = px.bar(final_df_counts, x="count", y="entity_group", color="entity_group", |
|
text_auto=True, title='Occurrences of Predicted Labels') |
|
fig_bar.update_layout(yaxis={'categoryorder':'total ascending'}) |
|
st.plotly_chart(fig_bar) |
|
if comet_initialized and experiment: |
|
experiment.log_figure(figure=fig_bar, figure_name="label_bar_chart") |
|
|
|
|
|
dfa = pd.DataFrame( |
|
data={ |
|
'Column Name': ['word', 'entity_group','score', 'start', 'end'], |
|
'Description': [ |
|
'entity extracted from your text data', |
|
'label (tag) assigned to a given extracted entity', |
|
'accuracy score; how accurately a tag has been assigned to a given entity', |
|
'index of the start of the corresponding entity', |
|
'index of the end of the corresponding entity', |
|
] |
|
} |
|
) |
|
buf = io.BytesIO() |
|
with zipfile.ZipFile(buf, "w") as myzip: |
|
myzip.writestr("Summary of the results.csv", df.to_csv(index=False)) |
|
myzip.writestr("Glossary of tags.csv", dfa.to_csv(index=False)) |
|
|
|
with stylable_container( |
|
key="download_button", |
|
css_styles="""button { background-color: yellow; border: 1px solid black; padding: 5px; color: black; }""", |
|
): |
|
st.download_button( |
|
label="Download zip file", |
|
data=buf.getvalue(), |
|
file_name="nlpblogs_ner_results.zip", |
|
mime="application/zip", |
|
) |
|
if comet_initialized and experiment: |
|
buf.seek(0) |
|
experiment.log_asset(buf.getvalue(), file_name="downloadable_results.zip") |
|
st.divider() |
|
|
|
if comet_initialized and experiment: |
|
experiment.end() |
|
|
|
end_time = time.time() |
|
elapsed_time = end_time - start_time |
|
st.info(f"Results processed in **{elapsed_time:.2f} seconds**.") |
|
|
|
st.write(f"Number of times you requested results: **{st.session_state['file_upload_attempts']}/{max_attempts}**") |
|
|