|
import gradio as gr |
|
import requests |
|
import os |
|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
|
|
|
|
model_id = "Qwen/Qwen1.5-0.5B-Chat" |
|
tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
model = AutoModelForCausalLM.from_pretrained(model_id) |
|
llm = pipeline("text-generation", model=model, tokenizer=tokenizer) |
|
|
|
|
|
def fetch_sales_order_headers(): |
|
api_url = "https://sandbox.api.sap.com/s4hanacloud/sap/opu/odata/sap/API_SALES_ORDER_SRV/A_SalesOrder?$top=5&$inlinecount=allpages" |
|
api_key = os.getenv("SAP_SANDBOX_API_KEY", "YOUR_API_KEY") |
|
headers = { |
|
"APIKey": api_key, |
|
"Accept": "application/json" |
|
} |
|
try: |
|
r = requests.get(api_url, headers=headers, timeout=10) |
|
r.raise_for_status() |
|
data = r.json() |
|
results = data.get('d', {}).get('results', []) |
|
if not results: |
|
return [] |
|
return results |
|
except Exception as e: |
|
return f"Error fetching Sales Orders: {e}" |
|
|
|
def format_sales_orders_for_llm(orders): |
|
if isinstance(orders, str): |
|
return orders |
|
context = "Here are the latest SAP sales orders:\n" |
|
for i, order in enumerate(orders, 1): |
|
context += ( |
|
f"{i}. Order: {order['SalesOrder']}, " |
|
f"Type: {order['SalesOrderType']}, " |
|
f"Org: {order['SalesOrganization']}, " |
|
f"Date: {order['SalesOrderDate']}, " |
|
f"SoldTo: {order['SoldToParty']}, " |
|
f"Net: {order['TotalNetAmount']} {order['TransactionCurrency']}, " |
|
f"Status: {order['OverallSDProcessStatus']}\n" |
|
) |
|
return context |
|
|
|
def chat_agent(message, history): |
|
sales_orders = fetch_sales_order_headers() |
|
context = format_sales_orders_for_llm(sales_orders) |
|
prompt = ( |
|
f"{context}\n" |
|
f"User asked: {message}\n" |
|
"Based on the above SAP sales orders, answer the user's question as accurately as possible. " |
|
"If the question asks for sorting, filtering, or summarizing, do it based on the data above." |
|
) |
|
llm_output = llm(prompt, max_new_tokens=256)[0]["generated_text"] |
|
|
|
response = llm_output.replace(prompt, "").strip() |
|
history = history or [] |
|
history.append((message, response)) |
|
return history, history |
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown( |
|
""" |
|
# SAP Sales Order Chat Agent (Small Chat Model) |
|
- Asks about SAP sales orders, values, filtering, sorting, etc. |
|
- Example: `Show me top 2 sales orders by value.` |
|
""" |
|
) |
|
chatbot = gr.Chatbot() |
|
txt = gr.Textbox(label="Your question") |
|
clear = gr.Button("Clear chat") |
|
|
|
txt.submit(chat_agent, [txt, chatbot], [chatbot, chatbot]) |
|
clear.click(lambda: ([], []), None, [chatbot, chatbot]) |
|
|
|
demo.launch() |
|
|