File size: 6,424 Bytes
ba92c64
 
b0db86e
 
9d9d8df
b0db86e
 
9fcc753
 
b0db86e
e0ee496
877548f
ba92c64
877548f
0faa5b9
5fcea96
0faa5b9
5fcea96
0faa5b9
012015d
b0db86e
 
012015d
68f876c
0faa5b9
9d9d8df
 
 
 
0faa5b9
 
b0db86e
 
 
 
 
 
9d9d8df
0faa5b9
9d9d8df
0faa5b9
 
b0db86e
 
9d9d8df
0faa5b9
9d9d8df
 
 
 
 
 
 
b0db86e
9d9d8df
0faa5b9
9d9d8df
b0db86e
9d9d8df
0faa5b9
 
 
 
b0db86e
 
9d9d8df
b0db86e
d174fdf
b0db86e
447e02e
b0db86e
 
 
635ee4c
 
b0db86e
d174fdf
b0db86e
 
 
 
a73d4b4
b0db86e
 
 
 
a73d4b4
 
b0db86e
635ee4c
b0db86e
 
 
 
 
635ee4c
 
b0db86e
 
635ee4c
9fcc753
b0db86e
8ee6c4c
9d9d8df
0faa5b9
9d9d8df
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fcc753
9d9d8df
9fcc753
 
 
 
 
 
 
 
 
 
 
 
9d9d8df
 
9fcc753
 
 
9d9d8df
 
 
0db913c
1d827ad
 
0db913c
b0db86e
1d827ad
b0db86e
 
1d827ad
0db913c
 
ba92c64
2bf55da
 
6d022b1
2f48198
2bf55da
9fcc753
 
 
 
3ec7e2b
9fcc753
 
3ec7e2b
b0db86e
9a24926
0db913c
9a24926
729c198
9fcc753
 
 
3ec7e2b
9fcc753
 
b0db86e
9fcc753
 
 
3736c71
9d9d8df
b0db86e
9fcc753
b0db86e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
from fastapi import FastAPI, Request
from fastapi.responses import HTMLResponse
import threading
import asyncio
import mysql.connector
import json
import logging
import pandas as pd
from llama_cpp import Llama
from transformers import pipeline
import os

app = FastAPI()

# Configure logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# Email and database configuration
DB_CONFIG = os.getenv('db')

# System prompt for LLM
prompt = os.getenv('prompt')

# Function to insert extracted shipment details into MySQL database
def insert_data(extracted_details):
    try:
        mydb = mysql.connector.connect(**DB_CONFIG)
        cursor = mydb.cursor()

        # Skip insertion if all required fields are empty
        required_fields = ['origin', 'destination', 'expected_shipment_datetime', 
                           'types_of_service', 'warehouse', 'description', 
                           'quantities', 'carrier_details']
        
        if all(extracted_details.get(field) in [None, ""] for field in required_fields):
            logger.info("Skipping insertion: All extracted values are empty.")
            return

        sql = """
        INSERT INTO shipment_details (
            origin, destination, expected_shipment_datetime, types_of_service, 
            warehouse, description, quantities, carrier_details
        ) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
        """
        values = (
            extracted_details.get('origin'),
            extracted_details.get('destination'),
            extracted_details.get('expected_shipment_datetime'),
            extracted_details.get('types_of_service'),
            extracted_details.get('warehouse'),
            extracted_details.get('description'),
            extracted_details.get('quantities'),
            extracted_details.get('carrier_details')
        )
        cursor.execute(sql, values)
        mydb.commit()
        logger.info("Data inserted successfully.")

    except mysql.connector.Error as db_err:
        logger.error(f"Database error: {db_err}")
    except Exception as ex:
        logger.error(f"Error inserting data: {ex}")

# Function to read and process emails
def read_email():
    logger.info("Loading Llama model...")
    llm = Llama.from_pretrained(
                                repo_id="microsoft/Phi-3-mini-4k-instruct-gguf",
                                filename="Phi-3-mini-4k-instruct-fp16.gguf", n_ctx=2048
                                )
    logger.info("Llama model loaded.")
    logger.info("Reading emails from CSV...")
    df = pd.read_csv('./emails.csv')
    for i in df['Body']:
        logger.info(f"Processing email: {i}")
        output = llm(
            f"<|system|>\n{prompt}<|end|><|user|>\n{i}<|end|>\n<|assistant|>",
            max_tokens=256,
            stop=["<|end|>"],
            echo=False)

        logger.info("Extracting details...")
        t = output['choices'][0]['text']
        logger.info('the model output : \n',t)
        extracted_details = json.loads(t[t.find('{'):t.find('}') + 1].replace("'", '"'))
        extracted_details = {key.lower().replace(" ", "_"): value for key, value in extracted_details.items()}
        
        # Add meta data placeholders
        meta_data = {
            'sender': None,
            'receiver': None,
            'cc': None,
            'bcc': None,
            'subject': None
        }
        extracted_details.update(meta_data)
        
        logger.info(f"Full extracted data: {extracted_details}")
        insert_data(extracted_details)

# Global variable to control the email processing loop
running = False

# HTML content for the web interface
html_content = """
<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>Email Processing</title>
    <style>
        body { font-family: Arial, sans-serif; margin: 50px; }
        h1 { color: #333; }
        button {
            padding: 10px 20px;
            margin: 10px;
            background-color: #4CAF50;
            color: white;
            border: none;
            cursor: pointer;
        }
        button.stop { background-color: #f44336; }
        #status { font-weight: bold; }
    </style>
    <script>
        async function startLoop() {
            const response = await fetch('/start', { method: 'POST' });
            const result = await response.text();
            document.getElementById("status").innerHTML = result;
        }
        async function stopLoop() {
            const response = await fetch('/stop', { method: 'POST' });
            const result = await response.text();
            document.getElementById("status").innerHTML = result;
        }
    </script>
</head>
<body>
    <h1>Email Processing Status: <span id="status">{{ status }}</span></h1>
    <button onclick="startLoop()">Start</button>
    <button class="stop" onclick="stopLoop()">Stop</button>
</body>
</html>
"""

# Function to process emails in a loop asynchronously
async def email_processing_loop():
    global running
    logger.info("Starting email processing loop...")
    while running:
        logger.info("Processing emails...")
        read_email()
        await asyncio.sleep(10)  # Non-blocking delay for the loop

# Endpoint to display the current email processor status
@app.get("/", response_class=HTMLResponse)
async def home():
    global running
    print(os.getenv('db'))
    status = "Running" if running else "Stopped"
    return HTMLResponse(content=html_content.replace("{{ status }}", status), status_code=200)

# Endpoint to start the email processing loop
@app.post("/start")
async def start_email_loop():
    global running
    if not running:
        running = True
        asyncio.ensure_future(email_processing_loop())
        logger.info("Email processing loop started.")
        return "Running"
    else:
        return "Already running"

# Endpoint to stop the email processing loop
@app.post("/stop")
async def stop_email_loop():
    global running
    if running:
        running = False
        logger.info("Email processing loop stopped.")
        return "Stopped"
    else:
        return "Already stopped"

if __name__ == "__main__":
    logger.info("Starting FastAPI server...")
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)