Spaces:
Sleeping
Sleeping
Commit
·
b087f3c
1
Parent(s):
ab53831
secrets and env vars uppercase
Browse files
src/legisqa_local/config/settings.py
CHANGED
|
@@ -24,9 +24,9 @@ def get_secret(key: str, default=None):
|
|
| 24 |
# Environment variables setup
|
| 25 |
def setup_environment():
|
| 26 |
"""Setup environment variables for the application"""
|
| 27 |
-
os.environ["LANGCHAIN_API_KEY"] = get_secret("
|
| 28 |
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
| 29 |
-
os.environ["LANGCHAIN_PROJECT"] = get_secret("
|
| 30 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 31 |
|
| 32 |
# ChromaDB configuration
|
|
|
|
| 24 |
# Environment variables setup
|
| 25 |
def setup_environment():
|
| 26 |
"""Setup environment variables for the application"""
|
| 27 |
+
os.environ["LANGCHAIN_API_KEY"] = get_secret("LANGCHAIN_API_KEY", "")
|
| 28 |
os.environ["LANGCHAIN_TRACING_V2"] = "true"
|
| 29 |
+
os.environ["LANGCHAIN_PROJECT"] = get_secret("LANGCHAIN_PROJECT", "legisqa-local")
|
| 30 |
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 31 |
|
| 32 |
# ChromaDB configuration
|
src/legisqa_local/core/llm.py
CHANGED
|
@@ -17,7 +17,7 @@ def get_llm(gen_config: dict):
|
|
| 17 |
llm = ChatOpenAI(
|
| 18 |
model=gen_config["model_name"],
|
| 19 |
temperature=gen_config["temperature"],
|
| 20 |
-
api_key=get_secret("
|
| 21 |
max_tokens=gen_config["max_output_tokens"],
|
| 22 |
)
|
| 23 |
|
|
@@ -25,7 +25,7 @@ def get_llm(gen_config: dict):
|
|
| 25 |
llm = ChatAnthropic(
|
| 26 |
model_name=gen_config["model_name"],
|
| 27 |
temperature=gen_config["temperature"],
|
| 28 |
-
api_key=get_secret("
|
| 29 |
max_tokens_to_sample=gen_config["max_output_tokens"],
|
| 30 |
)
|
| 31 |
|
|
@@ -34,14 +34,14 @@ def get_llm(gen_config: dict):
|
|
| 34 |
model=gen_config["model_name"],
|
| 35 |
temperature=gen_config["temperature"],
|
| 36 |
max_tokens=gen_config["max_output_tokens"],
|
| 37 |
-
api_key=get_secret("
|
| 38 |
)
|
| 39 |
|
| 40 |
case "Google":
|
| 41 |
llm = ChatGoogleGenerativeAI(
|
| 42 |
model=gen_config["model_name"],
|
| 43 |
temperature=gen_config["temperature"],
|
| 44 |
-
api_key=get_secret("
|
| 45 |
max_output_tokens=gen_config["max_output_tokens"],
|
| 46 |
)
|
| 47 |
|
|
|
|
| 17 |
llm = ChatOpenAI(
|
| 18 |
model=gen_config["model_name"],
|
| 19 |
temperature=gen_config["temperature"],
|
| 20 |
+
api_key=get_secret("OPENAI_API_KEY"),
|
| 21 |
max_tokens=gen_config["max_output_tokens"],
|
| 22 |
)
|
| 23 |
|
|
|
|
| 25 |
llm = ChatAnthropic(
|
| 26 |
model_name=gen_config["model_name"],
|
| 27 |
temperature=gen_config["temperature"],
|
| 28 |
+
api_key=get_secret("ANTHROPIC_API_KEY"),
|
| 29 |
max_tokens_to_sample=gen_config["max_output_tokens"],
|
| 30 |
)
|
| 31 |
|
|
|
|
| 34 |
model=gen_config["model_name"],
|
| 35 |
temperature=gen_config["temperature"],
|
| 36 |
max_tokens=gen_config["max_output_tokens"],
|
| 37 |
+
api_key=get_secret("TOGETHER_API_KEY"),
|
| 38 |
)
|
| 39 |
|
| 40 |
case "Google":
|
| 41 |
llm = ChatGoogleGenerativeAI(
|
| 42 |
model=gen_config["model_name"],
|
| 43 |
temperature=gen_config["temperature"],
|
| 44 |
+
api_key=get_secret("GOOGLE_API_KEY"),
|
| 45 |
max_output_tokens=gen_config["max_output_tokens"],
|
| 46 |
)
|
| 47 |
|