Spaces:
Sleeping
Sleeping
import sys | |
from typing import Any | |
class BaseCallbackHandler: | |
"""Class to handle callbacks for openai function agent""" | |
def on_llm_start(self, **kwargs: Any) -> None: | |
"""Run when LLM starts running.""" | |
def on_llm_new_token(self, token: str, **kwargs: Any) -> None: | |
"""Run on new LLM token. Only available when streaming is enabled.""" | |
def on_llm_end(self, response: str, **kwargs: Any) -> None: | |
"""Run when LLM ends running.""" | |
class PrintingCallBackHandler(BaseCallbackHandler): | |
"""Callback handler to call print function""" | |
def on_llm_start(self, **kwargs: Any) -> None: | |
"""Run when LLM starts running.""" | |
def on_llm_new_token(self, token: str, **kwargs: Any) -> None: | |
"""Run on new LLM token. Only available when streaming is enabled.""" | |
print(token) | |
def on_llm_end(self, response: str, **kwargs: Any) -> None: | |
"""Run when LLM ends running.""" | |
class StreamingStdOutCallBackHandler(BaseCallbackHandler): | |
"""Callback handler to call print function""" | |
def on_llm_start(self, **kwargs: Any) -> None: | |
"""Run when LLM starts running.""" | |
def on_llm_new_token(self, token: str, **kwargs: Any) -> None: | |
"""Run on new LLM token. Only available when streaming is enabled.""" | |
sys.stdout.write(token) | |
sys.stdout.flush() | |
def on_llm_end(self, response: str, **kwargs: Any) -> None: | |
"""Run when LLM ends running.""" | |
class QueueCallback(BaseCallbackHandler): | |
"""Callback handler for streaming LLM responses to a queue.""" | |
def __init__(self, q): | |
self.q = q | |
def on_llm_new_token(self, token: str, **kwargs: any) -> None: | |
self.q.put(token) | |
def on_llm_end(self, *args, **kwargs: any) -> None: | |
return self.q.empty() | |