|
from langchain.llms import HuggingFacePipeline |
|
from langchain.chains import LLMChain |
|
from langchain.prompts import PromptTemplate |
|
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer |
|
|
|
class NameExtractorChain: |
|
def __init__(self, model_name: str = "name-extraction"): |
|
self.pipe = pipeline( |
|
"text2text-generation", |
|
model=model_name, |
|
max_new_tokens=10, |
|
model_kwargs={"temperature": 0} |
|
) |
|
|
|
self.llm = HuggingFacePipeline(pipeline=self.pipe) |
|
|
|
self.prompt = PromptTemplate( |
|
input_variables=["conversation"], |
|
|
|
template="""Extract only the name of the person from this conversation. |
|
If there's no name, return 'No name found'. |
|
Conversation: {conversation}""") |
|
self.chain = LLMChain(llm=self.llm, prompt=self.prompt) |
|
|
|
|
|
def extract_name(self, text: str): |
|
text=text.strip() |
|
if len(text.split())==1: |
|
text= "It's " + text |
|
try: |
|
output = self.chain.run(conversation=text) |
|
return output |
|
except Exception as e: |
|
print(f"Error processing text: {str(e)}") |
|
|
|
|
|
extractor = NameExtractorChain() |
|
print(extractor.extract_name(sample-text)) |
|
|