File size: 3,144 Bytes
9c9a39f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
# from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoModelForCausalLM, pipeline
from langchain_huggingface import HuggingFacePipeline
import getpass
from langchain.chains import SimpleChain
from langchain_mistralai import ChatMistralAI
token = getpass.getpass("Token: ")

members = ["Researcher", "Coder"]
system_prompt = (
    "You are a supervisor tasked with managing a conversation between the"
    " following workers:  {members}. Given the following user request,"
    " respond with the worker to act next. Each worker will perform a"
    " task and respond with their results and status. When finished,"
    " respond with FINISH."
)
# Our team supervisor is an LLM node. It just picks the next agent to process
# and decides when the work is completed
options = ["FINISH"] + members
# Using openai function calling can make output parsing easier for us
function_def = {
    "name": "route",
    "description": "Select the next role.",
    "parameters": {
        "title": "routeSchema",
        "type": "object",
        "properties": {
            "next": {
                "title": "Next",
                "anyOf": [
                    {"enum": options},
                ],
            }
        },
        "required": ["next"],
    },
}
prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_prompt),
        MessagesPlaceholder(variable_name="messages"),
        (
            "system",
            "Given the conversation above, who should act next?"
            " Or should we FINISH? Select one of: {options}",
        ),
    ]
).partial(options=str(options), members=", ".join(members))
path = "mistralai/Mistral-7B-Instruct-v0.3"

model = AutoModelForCausalLM.from_pretrained(
    path,
    torch_dtype=torch.float16,
    low_cpu_mem_usage=True,
    trust_remote_code=True,
    device_map="auto",
    token=token
)

tokenizer = AutoTokenizer.from_pretrained(path, token=token)
tokenizer.add_special_tokens({"pad_token": "[PAD]"})
tokenizer.padding_side = "left"

pipe = pipeline(task='text-generation', model=model, tokenizer=tokenizer,
                num_return_sequences=1,
                eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.eos_token_id,
                max_new_tokens=260, temperature=0.7, do_sample=True)

llm = HuggingFacePipeline(pipeline=pipe)



def custom_function(input_text):
    # Example function logic
    return {"output": "processed " + input_text}

from langchain.tools import Tool

class MyCustomTool(Tool):
    def call(self, input_text: str) -> str:
        # Custom tool logic here
        return f"Processed: {input_text}"

# Initialize the custom tool
my_tool = MyCustomTool()

chain = prompt | llm | [my_tool]

# Define the input text
input_text = "Your input text here"

# Run the chain with the input text
result = chain.invoke(input_text)

print(result)