# streamlit_app.py # app.py import streamlit as st import re from sympy import symbols, integrate, exp, pi from transformers import AutoTokenizer, AutoModelForCausalLM import torch st.set_page_config(page_title="AI Physics Solver", page_icon="🧠") x, t = symbols("x t") def extract_integral(problem_text): match = re.search(r'(\d+)\*?[tx]\^(\d+)', problem_text) limits = re.findall(r'[tx]\s*=\s*([\d\.\\\w]+)', problem_text) exp_match = re.search(r'(\d+)e\^([\-\+]?\d+\.?\d*)[tx]', problem_text) if 'radioactive' in problem_text or 'half-life' in problem_text: decay_match = re.search(r'(\d+)\s*e\^\s*-\s*(\d+\.?\d*)[tx]', problem_text) if decay_match and len(limits) == 2: N0 = int(decay_match.group(1)) lam = float(decay_match.group(2)) lower, upper = map(lambda v: eval(v, {"pi": pi}), limits) expr = lam * N0 * exp(-lam * t) return f"Total decayed = {integrate(expr, (t, lower, upper)).evalf()} units." if match and len(limits) == 2: coefficient = int(match.group(1)) exponent = int(match.group(2)) lower_limit = eval(limits[0], {"pi": pi}) upper_limit = eval(limits[1], {"pi": pi}) expr = coefficient * x**exponent return f"Accumulated Quantity = {integrate(expr, (x, lower_limit, upper_limit))}" return "Could not parse the integral format." @st.cache_resource def load_deepseek(): model_name = "deepseek-ai/deepseek-math-7b-base" tokenizer = AutoTokenizer.from_pretrained(model_name) if torch.cuda.is_available(): model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16, device_map="auto" ) else: model = AutoModelForCausalLM.from_pretrained(model_name) return tokenizer, model def run_deepseek(user_question): tokenizer, model = load_deepseek() solution_steps = """ ### Solution: 1. Understand the problem and extract known quantities. 2. Apply relevant physical laws or mathematical formulas. 3. Solve algebraically or numerically as required. 4. Clearly present the final answer. ### Final Answer Format: Final Answer: [VARIABLE] = [ANSWER] [UNIT] """ prompt = f"Q: Solve the following physics problem using rigorous mathematical reasoning. Do not skip any steps.\n\nProblem: {user_question}\n\n{solution_steps}\nA:" inputs = tokenizer(prompt, return_tensors="pt") # Move inputs to GPU if available if torch.cuda.is_available(): inputs = inputs.to("cuda") with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=500, temperature=0.1, repetition_penalty=1.0, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id ) return tokenizer.decode(outputs[0], skip_special_tokens=True).split("A:")[-1].strip() # ---------------- UI Layout ---------------- st.title("🧠 AI Science Solver") task_type = st.selectbox("Choose Task Type", ["LLM Reasoning (DeepSeek)", "Symbolic Integration"]) user_question = st.text_area("Enter your physics or math question below:") if st.button("Solve"): with st.spinner("Solving..."): if task_type == "LLM Reasoning (DeepSeek)": result = run_deepseek(user_question) else: result = extract_integral(user_question) st.subheader("📘 Answer") st.write(result)