{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "## Arxiv-Research\n", "## Tools Creation" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "from langchain_community.tools import ArxivQueryRun, WikipediaQueryRun, DuckDuckGoSearchResults\n", "from langchain_community.utilities import WikipediaAPIWrapper, ArxivAPIWrapper, DuckDuckGoSearchAPIWrapper\n", "from langchain.tools import Tool" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "wikipedia\n" ] }, { "data": { "text/plain": [ "Tool(name='Wikipedia', description='This tool uses the Wikipedia API to search for a topic.', func=, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250))>)" ] }, "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ "api_wrapper_wiki = WikipediaAPIWrapper(top_k_results = 5, doc_content_chars_max=250)\n", "wiki = WikipediaQueryRun(api_wrapper=api_wrapper_wiki)\n", "print(wiki.name)\n", "wiki_tool= Tool(\n", " name = \"Wikipedia\",\n", " func = wiki.run,\n", " description = \"This tool uses the Wikipedia API to search for a topic.\"\n", ")\n", "wiki_tool" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "arxiv\n" ] }, { "data": { "text/plain": [ "Tool(name='arxiv', description='Searches arXiv for papers matching the query.', func=, arxiv_exceptions=(, , ), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250))>)" ] }, "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ "api_wrapper_arxiv = ArxivAPIWrapper(top_k_results = 5, doc_content_chars_max=250)\n", "arxiv = ArxivQueryRun(api_wrapper=api_wrapper_arxiv)\n", "print(arxiv.name)\n", "arxiv_tool = Tool(\n", " name = \"arxiv\",\n", " func = arxiv.run,\n", " description = \"Searches arXiv for papers matching the query.\",\n", ")\n", "arxiv_tool" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "duckduckgo_results_json\n" ] } ], "source": [ "api_wrapper_ddg = DuckDuckGoSearchAPIWrapper(region=\"us-en\", time=\"d\", max_results=10)\n", "ddg = DuckDuckGoSearchResults(api_wrapper=api_wrapper_ddg, source = \"news\")\n", "print(ddg.name)\n", "ddg_tool = Tool(\n", " name = \"DuckDuckGo_Search\",\n", " func = ddg.run,\n", " description = \"Searches for search queries using the DuckDuckGo Search engine.\"\n", ")" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [], "source": [ "ddg_tools = [ddg_tool]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Custom Tools" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "True" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langchain_community.document_loaders import WebBaseLoader\n", "from langchain_community.vectorstores import FAISS\n", "#from langchain_openai import OpenAIEmbeddings\n", "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", "from langchain_huggingface import HuggingFaceEmbeddings\n", "from tqdm.auto import tqdm\n", "\n", "import os\n", "from dotenv import load_dotenv\n", "load_dotenv()" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "os.environ[\"HF_TOKOEN\"] = os.getenv(\"HUGGINGFACE_TOKEN\")\n", "embeddings = HuggingFaceEmbeddings(model_name=\"all-MiniLM-L6-v2\")" ] }, { "cell_type": "code", "execution_count": 12, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=, search_kwargs={})" ] }, "execution_count": 12, "metadata": {}, "output_type": "execute_result" } ], "source": [ "loader = WebBaseLoader(\"https://docs.smith.langchain.com/\")\n", "docs = loader.load()\n", "documents = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200).split_documents(docs)\n", "vector_db = FAISS.from_documents(documents,embeddings)\n", "retriever = vector_db.as_retriever()\n", "retriever" ] }, { "cell_type": "code", "execution_count": 13, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "'langsmith-search'" ] }, "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langchain.tools.retriever import create_retriever_tool\n", "retriever_tool = create_retriever_tool(retriever,\"langsmith-search\", \"Search any information about Langsmith\")\n", "\n", "retriever_tool.name" ] }, { "cell_type": "code", "execution_count": 14, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "[Tool(name='Wikipedia', description='This tool uses the Wikipedia API to search for a topic.', func=, top_k_results=1, lang='en', load_all_available_meta=False, doc_content_chars_max=250))>),\n", " Tool(name='arxiv', description='Searches arXiv for papers matching the query.', func=, arxiv_exceptions=(, , ), top_k_results=1, ARXIV_MAX_QUERY_LENGTH=300, continue_on_failure=False, load_max_docs=100, load_all_available_meta=False, doc_content_chars_max=250))>),\n", " Tool(name='DuckDuckGo_Search', description='Searches for search queries using the DuckDuckGo Search engine.', func=),\n", " Tool(name='langsmith-search', description='Search any information about Langsmith', args_schema=, func=functools.partial(, retriever=VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'), coroutine=functools.partial(, retriever=VectorStoreRetriever(tags=['FAISS', 'HuggingFaceEmbeddings'], vectorstore=, search_kwargs={}), document_prompt=PromptTemplate(input_variables=['page_content'], input_types={}, partial_variables={}, template='{page_content}'), document_separator='\\n\\n', response_format='content'))]" ] }, "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tools = [wiki_tool, arxiv_tool, ddg_tool, retriever_tool]\n", "tools" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Run all these tools with Agents and LLM models.\n", "\n", "## Tools, LLM-->AgentExecutor" ] }, { "cell_type": "code", "execution_count": 15, "metadata": {}, "outputs": [], "source": [ "import openai\n", "from langchain_groq import ChatGroq" ] }, { "cell_type": "code", "execution_count": 16, "metadata": {}, "outputs": [], "source": [ "groq_api_key = os.getenv(\"GROQ_API_KEY\")\n", "openai.api_key = os.getenv(\"OPEN_API_KEY\")\n", "\n", "llm = ChatGroq(groq_api_key=groq_api_key, model_name = \"Llama3-8b-8192\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Prompt Template" ] }, { "cell_type": "code", "execution_count": 17, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "c:\\Users\\Ashutosh Choudhari\\.conda\\envs\\langchain_projects\\lib\\site-packages\\langsmith\\client.py:253: LangSmithMissingAPIKeyWarning: API key must be provided when using hosted LangSmith API\n", " warnings.warn(\n" ] }, { "data": { "text/plain": [ "[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}),\n", " MessagesPlaceholder(variable_name='chat_history', optional=True),\n", " HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}),\n", " MessagesPlaceholder(variable_name='agent_scratchpad')]" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langchain import hub\n", "\n", "prompt = hub.pull(\"hwchase17/openai-functions-agent\")\n", "prompt.messages" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Agents" ] }, { "cell_type": "code", "execution_count": 18, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "RunnableAssign(mapper={\n", " agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n", "})\n", "| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], optional_variables=['chat_history'], input_types={'chat_history': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=, custom_error_type=None, custom_error_message=None, custom_error_context=None))]], 'agent_scratchpad': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=, custom_error_type=None, custom_error_message=None, custom_error_context=None))]]}, partial_variables={'chat_history': []}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}), MessagesPlaceholder(variable_name='agent_scratchpad')])\n", "| RunnableBinding(bound=ChatGroq(client=, async_client=, model_name='Llama3-8b-8192', model_kwargs={}, groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'Wikipedia', 'description': 'This tool uses the Wikipedia API to search for a topic.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'Searches arXiv for papers matching the query.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'DuckDuckGo_Search', 'description': 'Searches for search queries using the DuckDuckGo Search engine.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith', 'parameters': {'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}]}, config={}, config_factories=[])\n", "| OpenAIToolsAgentOutputParser()" ] }, "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from langchain.agents import create_openai_tools_agent, AgentExecutor\n", "\n", "agent = create_openai_tools_agent(llm,tools,prompt)\n", "agent" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Run Agent" ] }, { "cell_type": "code", "execution_count": 19, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "AgentExecutor(verbose=True, agent=RunnableMultiActionAgent(runnable=RunnableAssign(mapper={\n", " agent_scratchpad: RunnableLambda(lambda x: format_to_openai_tool_messages(x['intermediate_steps']))\n", "})\n", "| ChatPromptTemplate(input_variables=['agent_scratchpad', 'input'], optional_variables=['chat_history'], input_types={'chat_history': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=, custom_error_type=None, custom_error_message=None, custom_error_context=None))]], 'agent_scratchpad': list[typing.Annotated[typing.Union[typing.Annotated[langchain_core.messages.ai.AIMessage, Tag(tag='ai')], typing.Annotated[langchain_core.messages.human.HumanMessage, Tag(tag='human')], typing.Annotated[langchain_core.messages.chat.ChatMessage, Tag(tag='chat')], typing.Annotated[langchain_core.messages.system.SystemMessage, Tag(tag='system')], typing.Annotated[langchain_core.messages.function.FunctionMessage, Tag(tag='function')], typing.Annotated[langchain_core.messages.tool.ToolMessage, Tag(tag='tool')], typing.Annotated[langchain_core.messages.ai.AIMessageChunk, Tag(tag='AIMessageChunk')], typing.Annotated[langchain_core.messages.human.HumanMessageChunk, Tag(tag='HumanMessageChunk')], typing.Annotated[langchain_core.messages.chat.ChatMessageChunk, Tag(tag='ChatMessageChunk')], typing.Annotated[langchain_core.messages.system.SystemMessageChunk, Tag(tag='SystemMessageChunk')], typing.Annotated[langchain_core.messages.function.FunctionMessageChunk, Tag(tag='FunctionMessageChunk')], typing.Annotated[langchain_core.messages.tool.ToolMessageChunk, Tag(tag='ToolMessageChunk')]], FieldInfo(annotation=NoneType, required=True, discriminator=Discriminator(discriminator=, custom_error_type=None, custom_error_message=None, custom_error_context=None))]]}, partial_variables={'chat_history': []}, metadata={'lc_hub_owner': 'hwchase17', 'lc_hub_repo': 'openai-functions-agent', 'lc_hub_commit_hash': 'a1655024b06afbd95d17449f21316291e0726f13dcfaf990cc0d18087ad689a5'}, messages=[SystemMessagePromptTemplate(prompt=PromptTemplate(input_variables=[], input_types={}, partial_variables={}, template='You are a helpful assistant'), additional_kwargs={}), MessagesPlaceholder(variable_name='chat_history', optional=True), HumanMessagePromptTemplate(prompt=PromptTemplate(input_variables=['input'], input_types={}, partial_variables={}, template='{input}'), additional_kwargs={}), MessagesPlaceholder(variable_name='agent_scratchpad')])\n", "| RunnableBinding(bound=ChatGroq(client=, async_client=, model_name='Llama3-8b-8192', model_kwargs={}, groq_api_key=SecretStr('**********')), kwargs={'tools': [{'type': 'function', 'function': {'name': 'Wikipedia', 'description': 'This tool uses the Wikipedia API to search for a topic.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'arxiv', 'description': 'Searches arXiv for papers matching the query.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'DuckDuckGo_Search', 'description': 'Searches for search queries using the DuckDuckGo Search engine.', 'parameters': {'properties': {'__arg1': {'title': '__arg1', 'type': 'string'}}, 'required': ['__arg1'], 'type': 'object'}}}, {'type': 'function', 'function': {'name': 'langsmith-search', 'description': 'Search any information about Langsmith', 'parameters': {'properties': {'query': {'description': 'query to look up in retriever', 'type': 'string'}}, 'required': ['query'], 'type': 'object'}}}]}, config={}, config_factories=[])\n", "| OpenAIToolsAgentOutputParser(), input_keys_arg=[], return_keys_arg=[], stream_runnable=True), tools=[Tool(name='DuckDuckGo_Search', description='Searches for search queries using the DuckDuckGo Search engine.', func=)])" ] }, "execution_count": 19, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor = AgentExecutor(agent = agent, tools = ddg_tools, verbose = True)\n", "agent_executor" ] }, { "cell_type": "code", "execution_count": 20, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `langsmith-search` with `{'query': 'Langsmith'}`\n", "\n", "\n", "\u001b[0mlangsmith-search is not a valid tool, try one of [DuckDuckGo_Search].\u001b[32;1m\u001b[1;3m\n", "Invoking: `DuckDuckGo_Search` with `Langsmith`\n", "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3msnippet: AI/ML frameworks API integrations (Meta AutoGen AWS Azure Chroma GCP Google Ads Langfuse LangGraph LangSmith LinkedIn Ads) Pinecone Python PyTorch Semantic Kernel TensorFlow Weaviate. Stipend: ₹ 16,667 /month - ₹ 25,000 /month . Batch: 2027, 2026, 2025 ..., title: Zocket hiring Tech Intern [AI, Stipend: 25K/month] - talentd.in, link: https://www.talentd.in/jobs.php?id=1765, snippet: LangSmith 사용하기 (0) 2025.01.07: LangChain에서 Rerank로 RAG 정확도 높이기 (1) 2024.12.29: Llama 3.2 Vision 자동차 번호 인식 (0) 2024.12.24: LangChain 으로 RAG기반 Wiki 검색 채팅창 만들기 (0) 2024.12.24: Python에서 2차원 배열 중 특정 열의 값을 모두 바꾸기 (0) 2024.10.27, title: ollama 의 deepseek 를 python 에서 사용하기, link: https://keistory.tistory.com/1609\u001b[0m\u001b[32;1m\u001b[1;3mIt seems like the tool call yield a result about Langsmith, which is likely a project or a platform related to AI/ML and natural language processing. The result includes snippets of text, job postings, and links to external websites. It appears that Langsmith is a company or a research initiative that focuses on developing AI/ML frameworks and tools, and it might be related to API integrations and other technical projects. The text snippets also mention LangChain, a project that seems to be related to Langsmith.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "{'input': 'Tell me about Langsmith',\n", " 'output': 'It seems like the tool call yield a result about Langsmith, which is likely a project or a platform related to AI/ML and natural language processing. The result includes snippets of text, job postings, and links to external websites. It appears that Langsmith is a company or a research initiative that focuses on developing AI/ML frameworks and tools, and it might be related to API integrations and other technical projects. The text snippets also mention LangChain, a project that seems to be related to Langsmith.'}" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor.invoke({\"input\": \"Tell me about Langsmith\"})" ] }, { "cell_type": "code", "execution_count": 21, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `DuckDuckGo_Search` with `What is Machine learning?`\n", "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3msnippet: The machine learning algorithms are a way to extract hidden patterns from large, laborious datasets. They might play a vital part in making decisions for complicated issues in health problems. In the field of health care, machine learning algorithms are becoming more and more important and are regarded as trustworthy methods for illness ..., title: An Empirical Review of Different Machine Learning Techniques ... - Springer, link: https://link.springer.com/chapter/10.1007/978-981-97-7946-8_19, snippet: Unlock the potential of Machine Learning with our in-depth course. Learn to autonomously identify trends and patterns through cutting-edge automation, enhancing your ability to manage diverse and complex data. Our course provides a deep dive into the practical applications of AI and Machine Learning, focusing on essential methodologies and ..., title: AI for Everyone: Machine Learning Fundamentals - Eventbrite, link: https://www.eventbrite.com/e/ai-for-everyone-machine-learning-fundamentals-tickets-1125751044679, snippet: DeepMind thinks that the secret to more capable AI could be in discovering new ways to tackle difficult geometry problems, particularly those involving Euclidean geometry., title: DeepMind's AI system can outperform math olympiad gold medalists, link: https://www.newsbytesapp.com/news/science/deepmind-says-its-ai-outperforms-international-mathematical-olympiad-gold-medalists/story, snippet: A machine learning-based application that predicts diseases based on user-input symptoms. Built using Python with Pandas, NumPy, Scikit-learn, and TensorFlow, it implements Decision Tree, Random Forest, and Naive Bayes algorithms for accurate predictions. The system provides disease predictions and suggests relevant doctors for consultation., title: Disease-prediction-using-Machine-Learning - GitHub, link: https://github.com/hruthikatigulla/Disease-Prediction-System\u001b[0m\u001b[32;1m\u001b[1;3mIt seems like you've already called the tool and got some results! Based on the snippet you provided, Machine learning is a way to extract hidden patterns from large datasets, and it's becoming increasingly important in healthcare. It's also used to identify trends and patterns through automation and is applied in various fields, including disease prediction. The results also mention AI and its potential applications.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "{'input': 'Search Duck Duck go and answer What is Machine learning?',\n", " 'output': \"It seems like you've already called the tool and got some results! Based on the snippet you provided, Machine learning is a way to extract hidden patterns from large datasets, and it's becoming increasingly important in healthcare. It's also used to identify trends and patterns through automation and is applied in various fields, including disease prediction. The results also mention AI and its potential applications.\"}" ] }, "execution_count": 21, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor.invoke({\"input\": \"Search Duck Duck go and answer What is Machine learning?\"})" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `DuckDuckGo_Search` with `Who is Max Verstappen?`\n", "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3msnippet: Max Verstappen has teased a career switch away from Formula 1 in a behind the scenes test with a rival motorsport series. The Dutchman has been linked with a series of team switches over the past year, with both Mercedes and Aston Martin emerging as interested teams to acquire Verstappen's ..., title: Verstappen teases career SWITCH after behind the scenes test, link: https://www.gpfans.com/en/f1-news/1040914/max-verstappen-f1-imsa-career-switch-acura-arx-06/, snippet: Max Verstappen's first appearance in his driver role will be at the F1 75 event on February 18 at the O2 arena to introduce the livery of Red Bull's new 2025 car. Jos Verstappen says fatherhood is ..., title: \"Champion of dad jokes\": Fans react as Max Verstappen ... - Sportskeeda, link: https://www.sportskeeda.com/f1/news-champion-dad-jokes-fans-react-max-verstappen-cracks-super-bowl-joke-livestream, snippet: Max Verstappen has long flirted with the idea of taking on the legendary Le Mans 24 Hours endurance race, but choosing the right teammate is a crucial factor for the Dutchman. While his father Jos Verstappen has officially bowed out of the dream, the four-tile F1 World Champion is still determined to make it happen - potentially alongside ..., title: Verstappen still open to Le Mans assault, but Dad says 'no', link: https://f1i.com/news/530359-verstappen-still-open-to-le-mans-assault-but-dad-says-no.html, snippet: Max Verstappen reigned supreme last season as he bagged his fourth Drivers' Championship title in a row, getting past a late challenge from McLaren driver Lando Norris., title: Max Verstappen earning 5 million more than Lewis ... - Hindustan Times, link: https://www.hindustantimes.com/sports/others/max-verstappen-earning-5-million-more-than-lewis-hamilton-how-much-are-f1-2025-drivers-earning-101739100426391.html\u001b[0m\u001b[32;1m\u001b[1;3mI called the tool for tool call id \"call_h8p6\" and it yielded the following text:\n", "\n", "Max Verstappen is a Dutch professional racing driver who currently competes in Formula One for Red Bull Racing. He has been linked with a series of team switches over the past year, including a potential move to Mercedes or Aston Martin. Verstappen has also been known to tease a career switch to other motorsports series, such as the IMSA WeatherTech SportsCar Championship. In addition to his F1 career, Verstappen has also expressed interest in taking on the legendary Le Mans 24 Hours endurance race.\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "{'input': 'search on duck duck go and answer me: Who is Max Verstappen?',\n", " 'output': 'I called the tool for tool call id \"call_h8p6\" and it yielded the following text:\\n\\nMax Verstappen is a Dutch professional racing driver who currently competes in Formula One for Red Bull Racing. He has been linked with a series of team switches over the past year, including a potential move to Mercedes or Aston Martin. Verstappen has also been known to tease a career switch to other motorsports series, such as the IMSA WeatherTech SportsCar Championship. In addition to his F1 career, Verstappen has also expressed interest in taking on the legendary Le Mans 24 Hours endurance race.'}" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor.invoke({\"input\": \"search on duck duck go and answer me: Who is Max Verstappen?\"})" ] }, { "cell_type": "code", "execution_count": 23, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `Wikipedia` with `DOI:1706.03762`\n", "\n", "\n", "\u001b[0mWikipedia is not a valid tool, try one of [DuckDuckGo_Search].\u001b[32;1m\u001b[1;3m\n", "Invoking: `arxiv` with `DOI:1706.03762`\n", "\n", "\n", "\u001b[0marxiv is not a valid tool, try one of [DuckDuckGo_Search].\u001b[32;1m\u001b[1;3m\n", "Invoking: `DuckDuckGo_Search` with `Transformer Architecture`\n", "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3msnippet: In our Transformer architecture [], we customized the embedding layers in both the encoder and decoder to leverage pre-trained word embeddings rather than learning embeddings from scratch.Specifically, we initialized the embedding weights with pre-trained GloVe vectors [] before training the full models.By using these fixed, high-quality embeddings, the Transformer can build on knowledge from ..., title: Mitigating Gender Bias in Hindi to English Neural Machine Translation, link: https://link.springer.com/chapter/10.1007/978-981-97-7946-8_8, snippet: As small as they are, SLMs stem from LLMs in order to exist. Their neural network-based architecture is known as the transformer model. SLMs are created via a process known as model compression ..., title: SLM series: Editorial brief & scope - Computer Weekly, link: https://www.computerweekly.com/blog/CW-Developer-Network/SLM-series-Editorial-brief-scope, snippet: The combined embeddings are then fed through a series of transformers, comprising attention mechanisms and feed-forward neural network operations that allow the model to generate context-specific ..., title: AI Insights: Generative AI (HTML) - GOV.UK, link: https://www.gov.uk/government/publications/ai-insights/ai-insights-generative-ai-html, snippet: The architecture is the structure of the machine learning code, neural networks were the first major development, followed by convolutional networks and now we are in a new era of transformer models which have caused a boom in Large Language Models, like chatGPT. Training data as the name suggests is the data or information used to train the ..., title: Intensifying AI Competition: Will the future be the time of AI?, link: https://ws.or.kr/article/36664\u001b[0m\u001b[32;1m\u001b[1;3m\n", "Invoking: `langsmith-search` with `{'query': 'Transformer Architecture'}`\n", "\n", "\n", "\u001b[0mlangsmith-search is not a valid tool, try one of [DuckDuckGo_Search].\u001b[32;1m\u001b[1;3mBased on the information gathered from the previous tool calls, I will provide a direct response to your question about the Transformer architecture.\n", "\n", "The Transformer architecture is a type of neural network architecture primarily used for machine translation, language modeling, and text generation tasks. It was introduced in the paper \"Attention Is All You Need\" by Vaswani et al. in 2017. The architecture is designed to handle sequential data and is particularly well-suited for natural language processing tasks.\n", "\n", "The Transformer architecture is based on self-attention mechanisms, which allow the model to attend to different parts of the input sequence simultaneously and weigh their importance. This is in contrast to traditional recurrent neural network (RNN) architectures, which rely on a sequential processing of the input sequence.\n", "\n", "The architecture consists of an encoder and a decoder. The encoder takes in a sequence of tokens (such as words or characters) and outputs a continuous representation of the sequence. The decoder then takes in this representation and generates an output sequence.\n", "\n", "The Transformer architecture has been widely adopted in the field of natural language processing and has achieved state-of-the-art results in many tasks, including machine translation, text generation, and language modeling.\n", "\n", "I hope this information is helpful!\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "{'input': 'Tell me about transformer architecture after reading the following research paper (DOI: 1706.03762)?',\n", " 'output': 'Based on the information gathered from the previous tool calls, I will provide a direct response to your question about the Transformer architecture.\\n\\nThe Transformer architecture is a type of neural network architecture primarily used for machine translation, language modeling, and text generation tasks. It was introduced in the paper \"Attention Is All You Need\" by Vaswani et al. in 2017. The architecture is designed to handle sequential data and is particularly well-suited for natural language processing tasks.\\n\\nThe Transformer architecture is based on self-attention mechanisms, which allow the model to attend to different parts of the input sequence simultaneously and weigh their importance. This is in contrast to traditional recurrent neural network (RNN) architectures, which rely on a sequential processing of the input sequence.\\n\\nThe architecture consists of an encoder and a decoder. The encoder takes in a sequence of tokens (such as words or characters) and outputs a continuous representation of the sequence. The decoder then takes in this representation and generates an output sequence.\\n\\nThe Transformer architecture has been widely adopted in the field of natural language processing and has achieved state-of-the-art results in many tasks, including machine translation, text generation, and language modeling.\\n\\nI hope this information is helpful!'}" ] }, "execution_count": 23, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor.invoke({\"input\": \"Tell me about transformer architecture after reading the following research paper (DOI: 1706.03762)?\"})" ] }, { "cell_type": "code", "execution_count": 26, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\u001b[1m> Entering new AgentExecutor chain...\u001b[0m\n", "\u001b[32;1m\u001b[1;3m\n", "Invoking: `Wikipedia` with `Donald Trump`\n", "\n", "\n", "\u001b[0mWikipedia is not a valid tool, try one of [DuckDuckGo_Search].\u001b[32;1m\u001b[1;3m\n", "Invoking: `DuckDuckGo_Search` with `Donald Trump`\n", "\n", "\n", "\u001b[0m\u001b[36;1m\u001b[1;3msnippet: President Donald Trump speaks during a joint press conference with Japanese Prime Minister Shigeru Ishiba in the East Room at the White House, Feb. 7, 2025 in Washington. Andrew Harnik/Getty Images, title: Trump 2nd term live updates: Musk 'not gaining anything' from DOGE ..., link: https://abcnews.go.com/Politics/live-updates/trump-second-term-live-updates/?id=118389757, snippet: Nearly three weeks into Donald Trump's second term in office, the president is continuing to dramatically reimagine and expand his use of executive authority — and to use that authority to ..., title: Trump's recent actions show his revenge tour isn't slowing down, link: https://www.cnn.com/2025/02/09/politics/trump-actions-settling-scores/index.html, snippet: Amid his aggressive drive to radically reshape America, President Donald Trump arrived at Super Bowl LIX in New Orleans on Sunday as the first sitting president to attend the NFL's championship game. The 47th president arrived about an hour before kickoff for the big game between the Philadelphia ..., title: Donald Trump Makes Historic Appearance At Super Bowl LIX, link: https://www.huffpost.com/entry/donald-trump-attends-2025-super-bowl_n_67a79c2fe4b0c49b6c88f080, snippet: President Donald Trump prerecorded an interview with Bret Baier that's scheduled to be aired on Sunday, ahead of Super Bowl 59. Part of the interview with Baier, Fox News Channel's chief political ..., title: President Donald Trump Super Bowl pregame interview: How to watch, link: https://www.usatoday.com/story/news/politics/2025/02/09/trump-super-bowl-59-interview/78373518007/\u001b[0m\u001b[32;1m\u001b[1;3mIt seems like President Donald Trump has been quite busy since his second term in office. Let me summarize the recent news articles for you:\n", "\n", "President Trump has been expanding his use of executive authority and settling scores with his political opponents. He made a historic appearance at Super Bowl LIX, becoming the first sitting president to attend the NFL's championship game. He also prerecorded an interview with Bret Baier of Fox News Channel that will be aired ahead of the game.\n", "\n", "It's interesting to note that President Trump has been using his executive authority to reshape America in his vision. He has been actively working to achieve his goals, and it seems like his \"revenge tour\" is still ongoing.\n", "\n", "I hope this summary helps you stay up-to-date with the latest news about President Trump!\u001b[0m\n", "\n", "\u001b[1m> Finished chain.\u001b[0m\n" ] }, { "data": { "text/plain": [ "{'input': 'tell me recent news about Donald trump.',\n", " 'output': 'It seems like President Donald Trump has been quite busy since his second term in office. Let me summarize the recent news articles for you:\\n\\nPresident Trump has been expanding his use of executive authority and settling scores with his political opponents. He made a historic appearance at Super Bowl LIX, becoming the first sitting president to attend the NFL\\'s championship game. He also prerecorded an interview with Bret Baier of Fox News Channel that will be aired ahead of the game.\\n\\nIt\\'s interesting to note that President Trump has been using his executive authority to reshape America in his vision. He has been actively working to achieve his goals, and it seems like his \"revenge tour\" is still ongoing.\\n\\nI hope this summary helps you stay up-to-date with the latest news about President Trump!'}" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "agent_executor.invoke({\"input\": \"tell me recent news about Donald trump.\"})" ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Available Ollama Models\n", "['llama3:latest', 'gemma2:2b']\n" ] } ], "source": [ "import requests\n", "\n", "def list_ollama_models(base_url):\n", " \"\"\"Fetches the list of available models from the running Ollama instance.\"\"\"\n", " try:\n", " response = requests.get(f\"{base_url}/api/tags\")\n", " response.raise_for_status() # Raise an error for HTTP failures\n", " models = response.json().get(\"models\", [])\n", " return [model[\"name\"] for model in models]\n", " except requests.exceptions.RequestException as e:\n", " return [f\"Error: {e}\"]\n", "\n", "OLLAMA_WSL_IP = \"http://127.0.0.1:11434\"\n", "\n", "# Call this function to check available models\n", "ollama_models = list_ollama_models(OLLAMA_WSL_IP)\n", "\n", "# Display models in Streamlit sidebar\n", "print(\"Available Ollama Models\")\n", "print(ollama_models)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "langchain_projects", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.16" } }, "nbformat": 4, "nbformat_minor": 2 }