|
from smolagents import Tool |
|
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig |
|
import torch |
|
|
|
|
|
class ModelMathTool(Tool): |
|
name = "math_model" |
|
description = "Answers advanced math questions using a pretrained math model." |
|
|
|
inputs = { |
|
"problem": { |
|
"type": "string", |
|
"description": "Math problem to solve.", |
|
} |
|
} |
|
|
|
output_type = "string" |
|
|
|
def __init__(self, model_name= "deepseek-ai/deepseek-math-7b-base"): |
|
print(f"Loading math model: {model_name}") |
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
print("loaded tokenizer") |
|
self.model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16) |
|
print("loaded auto model") |
|
|
|
self.model.generation_config = GenerationConfig.from_pretrained(model_name) |
|
print("loaded coonfig") |
|
|
|
self.model.generation_config.pad_token_id = self.model.generation_config.eos_token_id |
|
print("loaded pad token") |
|
|
|
|
|
|
|
def forward(self, problem: str) -> str: |
|
print(f"[MathModelTool] Question: {problem}") |
|
|
|
inputs = self.tokenizer(problem, return_tensors="pt") |
|
outputs =self.model.generate(**inputs, max_new_tokens=100) |
|
|
|
result = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
return result |
|
|
|
class WikipediaTool(Tool): |
|
name = "wikip_tool" |
|
description = "Searches Wikipedia and provides summary about the queried topic." |
|
|
|
inputs = { |
|
"query": { |
|
"type": "string", |
|
"description": "Topic of wikipedia search", |
|
} |
|
} |
|
|
|
output_type = "string" |
|
|
|
|
|
def __init__(self): |
|
import wikipedia |
|
|
|
def forward(self, query: str) -> str: |
|
return wikipedia.summary(query, sentences=3) |
|
|
|
|
|
|