|
import os |
|
from dotenv import load_dotenv, find_dotenv |
|
from langchain.callbacks.streaming_stdout_final_only import ( |
|
FinalStreamingStdOutCallbackHandler, |
|
) |
|
|
|
_ = load_dotenv(find_dotenv()) |
|
|
|
openaiAPIVersion = os.getenv("OPENAI_API_VERSION") |
|
gpt4Model = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME_GPT4") |
|
gpt35Model = os.getenv("AZURE_OPENAI_DEPLOYMENT_NAME_GPT35") |
|
print(openaiAPIVersion, gpt4Model, gpt35Model) |
|
|
|
from langchain.chat_models import AzureChatOpenAI |
|
|
|
llm = AzureChatOpenAI( |
|
temperature=0, |
|
deployment_name=gpt35Model, |
|
openai_api_version=openaiAPIVersion, |
|
) |
|
|
|
llm4 = AzureChatOpenAI( |
|
temperature=0, |
|
deployment_name=gpt4Model, |
|
openai_api_version=openaiAPIVersion, |
|
streaming=True, |
|
) |
|
|