File size: 34,259 Bytes
bea5044
a0e37e2
 
 
f86d7f2
 
 
a0e37e2
 
 
 
 
 
 
bea5044
a0e37e2
 
bea5044
 
a0e37e2
 
 
 
 
 
 
 
 
 
f86d7f2
a0e37e2
 
 
f86d7f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
a0e37e2
 
 
 
 
 
bea5044
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
a0e37e2
bea5044
 
 
 
 
 
 
 
a0e37e2
 
bea5044
a0e37e2
 
 
 
 
 
bea5044
 
 
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
a0e37e2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
 
 
 
 
 
 
 
bea5044
f86d7f2
 
 
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
bea5044
f86d7f2
 
 
 
bea5044
f86d7f2
bea5044
 
 
 
f86d7f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
f86d7f2
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
 
 
f86d7f2
bea5044
f86d7f2
 
 
 
 
bea5044
f86d7f2
bea5044
 
 
f86d7f2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f86d7f2
bea5044
 
 
 
 
 
f86d7f2
 
bea5044
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
from typing import TypedDict, List
from functools import partial
import json
import ast
from ask_candid.base.api_base import BaseAPI
import os
import pandas as pd
from pydantic import BaseModel, Field

from langchain_core.runnables import RunnableSequence
from langchain_core.language_models.llms import LLM
from langchain.agents.openai_functions_agent.base import create_openai_functions_agent
from langchain.agents.agent import AgentExecutor
from langchain.agents.agent_types import AgentType
from langchain.prompts import ChatPromptTemplate, PromptTemplate, MessagesPlaceholder
from langchain.output_parsers import PydanticOutputParser
from langchain.schema import BaseMessage
from langchain.agents import create_tool_calling_agent, AgentExecutor
from langchain_core.tools import Tool

from langgraph.graph import StateGraph, END

from ask_candid.tools.elastic.index_data_tool import IndexShowDataTool
from ask_candid.tools.elastic.index_details_tool import IndexDetailsTool
from ask_candid.tools.elastic.index_search_tool import create_search_tool

tools = [
    IndexShowDataTool(),
    IndexDetailsTool(),
    create_search_tool(pcs_codes={}),
]


class AutocodingAPI(BaseAPI):
    def __init__(self):
        super().__init__(
            url=os.getenv("AUTOCODING_API_URL"),
            headers={
                "x-api-key": os.getenv("AUTOCODING_API_KEY"),
                "Content-Type": "application/json",
            },
        )

    def __call__(self, text: str, taxonomy: str = "pcs-v3"):
        params = {"text": text, "taxonomy": taxonomy}
        return self.get(**params)


def find_subject_levels(filtered_df, subject_level_i, target_value):
    """
    Filters the DataFrame from the last valid NaN in 'Subject Level i' and retrieves corresponding values for lower levels.

    Parameters:
        filtered_df (pd.DataFrame): The input DataFrame.
        subject_level_i (int): The subject level to filter from (1 to 4).
        target_value (str): The value to search for in 'Subject Level i'.

    Returns:
        dict: A dictionary containing values for 'Subject Level i' to 'Subject Level 1'.
        pd.DataFrame: The filtered DataFrame from the determined start index to the target_value row.
    """
    if subject_level_i < 1 or subject_level_i > 4:
        raise ValueError("subject_level_i should be between 1 and 4")

    # Define the target column dynamically
    target_column = f"Subject Level {subject_level_i}"

    # Find indices where the target column has the target value
    target_indices = filtered_df[
        filtered_df[target_column].astype(str).str.strip() == target_value
    ].index

    if target_indices.empty:
        return {}, pd.DataFrame()  # Return empty if target_value is not found

    # Get the first occurrence of the target value
    first_target_index = target_indices[0]

    # Initialize dictionary to store subject level values
    subject_level_values = {target_column: target_value}

    # Initialize subject level start index
    subject_level_start = first_target_index

    # Find the last non-NaN row for each subject level
    for level in range(subject_level_i - 1, 0, -1):  # Loop from subject_level_i-1 to 1
        column_name = f"Subject Level {level}"

        # Start checking above the previous found index
        current_index = subject_level_start - 1

        while current_index >= 0 and pd.isna(
            filtered_df.loc[current_index, column_name]
        ):
            current_index -= 1  # Move up while NaN is found

        # Move one row down to get the last valid row in 'Subject Level level'
        subject_level_start = current_index + 1

        # Ensure we store the correct value at each subject level
        if subject_level_start in filtered_df.index:
            subject_level_values[column_name] = filtered_df.loc[
                subject_level_start - 1, column_name
            ]

    # Ensure valid slicing range
    min_start_index = subject_level_start

    if min_start_index < first_target_index:
        filtered_df = filtered_df.loc[min_start_index:first_target_index]
    else:
        filtered_df = pd.DataFrame()

    return subject_level_values, filtered_df


def extract_heirarchy(full_code, target_value):
    # df = pd.read_excel(
    #     r"C:\Users\mukul.rawat\OneDrive - Candid\Documents\Projects\Gen AI\azure_devops\ask-candid-assistant\PCS_Taxonomy_Definitions_2024.xlsx"
    # )
    df = pd.read_excel(r"C:\Users\siqi.deng\Downloads\PCS_Taxonomy_Definitions_2024.xlsx")
    filtered_df = df[df["PCS Code"].str.startswith(full_code[:2], na=False)]
    for i in range(1, 5):
        column_name = f"Subject Level {i}"
        if (df[column_name].str.strip() == target_value).any():
            break

    subject_level_values, filtered_df = find_subject_levels(
        filtered_df, i, target_value
    )
    sorted_values = [
        value
        for key, value in sorted(
            subject_level_values.items(), key=lambda x: int(x[0].split()[-1])
        )
    ]
    # Joining values in the required format
    result = " : ".join(sorted_values)
    return result


class GraphState(TypedDict):
    query: str = Field(
        ..., description="The user's query to be processed by the system."
    )
    agent_out: str = Field(
        ...,
        description="The output generated by the AI agent after processing the query.",
    )
    next_step: str = Field(
        ..., description="The next step in the workflow, determined by query analysis."
    )
    es_query: dict = Field(
        ..., description="The Elasticsearch query generated or used by the agent."
    )

    es_result: dict = Field(
        ...,
        description="The Elasticsearch query result generated or used by the agent.",
    )
    pcs_codes: dict = Field(..., description="pcs codes")


class AnalysisResult(BaseModel):
    category: str = Field(..., description="Either 'general' or 'Database'")


def agent_factory(llm: LLM) -> AgentExecutor:
    """
    Creates and configures an AgentExecutor instance for interacting with Elasticsearch.

    This function initializes an OpenAI GPT-4-based LLM with specific parameters,
    constructs a prompt tailored for Elasticsearch assistance, and integrates the
    agent with a set of tools to handle user queries. The agent is designed to work
    with OpenAI functions for enhanced capabilities.

    Returns:
        AgentExecutor: Configured agent ready to execute tasks with specified tools,
                       providing detailed intermediate steps for transparency.
    """

    # llm = ChatOpenAI(
    #     model="gpt-4o", temperature=0, api_key=OPENAI["key"], streaming=False
    # )

    tags_ = []
    agent = AgentType.OPENAI_FUNCTIONS
    tags_.append(agent.value if isinstance(agent, AgentType) else agent)
    # Create the prompt
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", "You are a helpful elasticsearch assistant"),
            MessagesPlaceholder(variable_name="chat_history", optional=True),
            ("human", "{input}"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ]
    )

    # Create the agent
    agent_obj = create_openai_functions_agent(llm, tools, prompt)

    return AgentExecutor.from_agent_and_tools(
        agent=agent_obj,
        tools=tools,
        tags=tags_,
        verbose=True,
        return_intermediate_steps=True,
    )


def agent_factory_claude(llm: LLM) -> AgentExecutor:
    """
    Creates and configures an AgentExecutor instance for interacting with Elasticsearch.

    This function initializes an OpenAI GPT-4-based LLM with specific parameters,
    constructs a prompt tailored for Elasticsearch assistance, and integrates the
    agent with a set of tools to handle user queries. The agent is designed to work
    with OpenAI functions for enhanced capabilities.

    Returns:
        AgentExecutor: Configured agent ready to execute tasks with specified tools,
                       providing detailed intermediate steps for transparency.
    """

    # llm = ChatOpenAI(
    #     model="gpt-4o", temperature=0, api_key=OPENAI["key"], streaming=False
    # )

    # tags_ = []
    # agent = AgentType.OPENAI_FUNCTIONS
    # tags_.append(agent.value if isinstance(agent, AgentType) else agent)
    # Create the prompt
    prompt = ChatPromptTemplate.from_messages(
        [
            ("system", "You are a helpful elasticsearch assistant"),
            MessagesPlaceholder(variable_name="chat_history", optional=True),
            ("human", "{input}"),
            MessagesPlaceholder(variable_name="agent_scratchpad"),
        ]
    )

    agent = create_tool_calling_agent(llm, tools, prompt)
    agent_executor = AgentExecutor.from_agent_and_tools(
        agent=agent, tools=tools, verbose=True, return_intermediate_steps=True
    )
    # Create the agent
    return agent_executor


# define graph node functions
def general_query(state: GraphState, llm: LLM) -> GraphState:
    """
    Processes a user query using an LLM and updates the graph state with the response.

    Args:
        state (GraphState): Current graph state containing the user's query.
        llm (LLM): Language model to process the query.

    Returns:
        GraphState: Updated state with the LLM's response in "agent_out".
    """
    print("> General query")
    prompt = ChatPromptTemplate.from_template(
        "Answer based on the user's query: {query}"
    )
    chain = prompt | llm
    response = chain.invoke({"query": state["query"]})
    if isinstance(response, BaseMessage):
        state["agent_out"] = response.content
    else:
        state["agent_out"] = str(response)
    return state


def database_agent(state: GraphState, llm: LLM) -> GraphState:
    """
    Executes a database query using an Elasticsearch agent and updates the graph state.

    The agent queries indices and field names in the Elasticsearch database,
    selects the appropriate index (`organization_dev_2`), and answers the user's question.

    Args:
        state (GraphState): Current graph state containing the user's query.

    Returns:
        GraphState: Updated state with the agent's output in "agent_out" and
                    the Elasticsearch query in "es_query".
    """

    print("> database agent")
    input_data = {
        "input": f"""
        You are an Elasticsearch database agent designed to accurately understand and respond to user queries. Follow these steps:

            1. Understand the user query to determine the required information.
            2. Query the indices in the Elasticsearch database.
            3. Retrieve the mappings and field names relevant to the query.
            4. Use the organization_dev_2 index to extract the necessary data.
            5. Present the response in a clear and natural language format, addressing the user's question directly.

        User's quer:
        ```{state["query"]}```
        """
    }
    agent_exec = agent_factory_claude(llm)
    res = agent_exec.invoke(input_data)
    state["agent_out"] = res["output"]

    es_queries, es_results = {}, {}
    for i, action in enumerate(res.get("intermediate_steps", []), start=1):
        if action[0].tool == "elastic_index_search_tool":
            es_queries[f"query_{i}"] = json.loads(
                action[0].tool_input.get("query") or "{}"
            )
            es_results[f"query_{i}"] = ast.literal_eval(action[-1] or "{}")

    # if len(res["intermediate_steps"]) > 1:
    #     es_queries = {
    #         f"query_{i}": action[0].tool_input.get("query", "")
    #         for i, action in enumerate(res.get("intermediate_steps", []), start=1)
    #         if action[0].tool == "elastic_index_search_tool"
    #     }

    #     es_results = {
    #         f"result_{i}": action[-1]
    #         for i, action in enumerate(res.get("intermediate_steps", []), start=1)
    #         if action[0].tool == "elastic_index_search_tool"
    #     }

    #     state["es_query"] = es_queries
    #     state["es_result"] = es_results
    # else:
    #     state["es_query"] = res["intermediate_steps"][-1][0].tool_input["query"]
    #     state["es_result"] = {"result": res["intermediate_steps"][-2][-1]}

    state["es_query"] = es_queries
    state["es_result"] = es_results
    return state


def analyse_query(state: GraphState, llm: LLM) -> GraphState:
    """
    Analyzes the user's query to classify it as either general or database-specific
    and determines the next processing step.

    Args:
        state (GraphState): Current graph state containing the user's query.
        llm (LLM): Language model used for query analysis.

    Returns:
        GraphState: Updated state with the classification result and the
                    next processing step in "next_step".
    """

    print("> analyse query")
    prompt_template = """Your task is to analyze the query  ```{query}``` and classify it in:
    general: it's a basic general enquiry
    Database: query which is complicated and would require to go into the database and extract specific information
    Output format:
    {{"category": "<your_classification>"}}
    """

    # Create the prompt
    prompt = ChatPromptTemplate.from_template(prompt_template)

    # Define the parser
    parser = PydanticOutputParser(pydantic_object=AnalysisResult)

    # Create the chain
    chain = RunnableSequence(prompt, llm)
    # Invoke the chain with the query
    response = chain.invoke({"query": state["query"]})
    if "Database" in response.content:
        state["next_step"] = "es_database_agent"
    else:
        state["next_step"] = "general_query"
    return state


def final_answer(state: GraphState, llm: LLM) -> GraphState:
    """
    Generates and presents the final response based on the user's query and the AI's output.

    Args:
        state (GraphState): Current graph state containing the query and AI output.
        llm (LLM): Language model used to format the final response.

    Returns:
        GraphState: Updated state with the formatted final answer in "agent_out".
    """

    print("> Final Answer")
    prompt_template = """
    You are a chat agent that takes outputs generated by Elasticsearch and presents them in a conversational, natural language format, as if responding to a user's query.

    Query: ```{query}```

    AI Output: 
    ```{output}```
    """
    prompt = ChatPromptTemplate.from_template(prompt_template)
    chain = RunnableSequence(prompt, llm)
    response = chain.invoke({"query": state["query"], "output": state["agent_out"]})

    return {"agent_out": response.content}


def build_compute_graph(llm: LLM) -> StateGraph:
    """
    Constructs a compute graph for processing user queries using a defined workflow.

    The workflow includes nodes for query analysis, handling general or database-specific queries,
    and generating the final response. Conditional logic determines the path based on query type.

    Args:
        llm (LLM): Language model to be used in various nodes for processing queries.

    Returns:
        StateGraph: Configured compute graph ready for execution.
    """
    # Create the workflow
    workflow = StateGraph(GraphState)

    # Add nodes
    workflow.add_node("analyse", partial(analyse_query, llm=llm))
    workflow.add_node("general_query", partial(general_query, llm=llm))
    workflow.add_node("es_database_agent", partial(database_agent, llm=llm))
    workflow.add_node("final_answer", partial(final_answer, llm=llm))

    # Set entry point
    workflow.set_entry_point("analyse")

    # Add conditional edges
    workflow.add_conditional_edges(
        "analyse",
        lambda x: x["next_step"],  # Use the return value of analyse_query directly
        {"es_database_agent": "es_database_agent", "general_query": "general_query"},
    )

    # Add edges to end the workflow
    workflow.add_edge("es_database_agent", "final_answer")
    workflow.add_edge("general_query", "final_answer")
    workflow.add_edge("final_answer", END)

    return workflow


class ElasticGraph(StateGraph):
    llm: LLM
    tools: List[Tool]

    def __init__(self, llm: LLM, tools: List[Tool]):
        super().__init__(GraphState)
        self.llm = llm
        self.tools = tools
        self.construct_graph()

    def Extract_PCS_Codes(self, state):
        """Todo: Add Subject heirarchies, Population, Geo"""
        print("query", state["query"])
        autocoding_api = AutocodingAPI()
        autocoding_response = autocoding_api(text=state["query"]).get("data", {})
        # population_served = autocoding_response.get("population", {})
        subjects = autocoding_response.get("subject", {})
        descriptions = []
        heirarchy_string = []
        if subjects and isinstance(subjects, list) and "description" in subjects[0]:
            for subject in subjects:
                # if subject['description'] in subjects_list:
                descriptions.append(subject["description"])
                heirarchy_string.append(
                    extract_heirarchy(subject["full_code"], subject["description"])
                )
        print("descriptions", descriptions)

        populations = autocoding_response.get("population", {})
        population_dict = []
        if (
            populations
            and isinstance(populations, list)
            and "description" in populations[0]
        ):
            for population in populations:
                population_dict.append(population["description"])
        state["pcs_codes"] = {
            "subject": descriptions,
            "heirarchy_string": heirarchy_string,
            "population": population_dict,
        }
        print("pcs_codes_new", state["pcs_codes"])
        return state

    def agent_factory(self) -> AgentExecutor:
        """
        Creates and configures an AgentExecutor instance for interacting with Elasticsearch.

        This function initializes an OpenAI GPT-4-based LLM with specific parameters,
        constructs a prompt tailored for Elasticsearch assistance, and integrates the
        agent with a set of tools to handle user queries. The agent is designed to work
        with OpenAI functions for enhanced capabilities.

        Returns:
            AgentExecutor: Configured agent ready to execute tasks with specified tools,
                        providing detailed intermediate steps for transparency.
        """

        # llm = ChatOpenAI(
        #     model="gpt-4o", temperature=0, api_key=OPENAI["key"], streaming=False
        # )

        tags_ = []
        agent = AgentType.OPENAI_FUNCTIONS
        tags_.append(agent.value if isinstance(agent, AgentType) else agent)
        # Create the prompt
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", "You are a helpful elasticsearch assistant"),
                MessagesPlaceholder(variable_name="chat_history", optional=True),
                ("human", "{input}"),
                MessagesPlaceholder(variable_name="agent_scratchpad"),
            ]
        )

        # Create the agent
        agent_obj = create_openai_functions_agent(self.llm, tools, prompt)

        return AgentExecutor.from_agent_and_tools(
            agent=agent_obj,
            tools=tools,
            tags=tags_,
            verbose=True,
            return_intermediate_steps=True,
        )

    def agent_factory_claude(self, pcs_codes, prefix) -> AgentExecutor:
        """
        Creates and configures an AgentExecutor instance for interacting with Elasticsearch.

        This function initializes an OpenAI GPT-4-based LLM with specific parameters,
        constructs a prompt tailored for Elasticsearch assistance, and integrates the
        agent with a set of tools to handle user queries. The agent is designed to work
        with OpenAI functions for enhanced capabilities.

        Returns:
            AgentExecutor: Configured agent ready to execute tasks with specified tools,
                        providing detailed intermediate steps for transparency.
        """
        prompt = ChatPromptTemplate.from_messages(
            [
                ("system", f"You are a helpful elasticsearch assistant. {prefix}"),
                MessagesPlaceholder(variable_name="chat_history", optional=True),
                ("human", "{input}"),
                MessagesPlaceholder(variable_name="agent_scratchpad"),
            ]
        )

        tools = [
            # ListIndicesTool(),
            IndexShowDataTool(),
            IndexDetailsTool(),
            create_search_tool(pcs_codes=pcs_codes),
        ]
        agent = create_tool_calling_agent(self.llm, tools, prompt)

        agent_executor = AgentExecutor.from_agent_and_tools(
            agent=agent,
            tools=tools,
            verbose=True,
            return_intermediate_steps=True,
        )
        # Create the agent
        return agent_executor

    def analyse_query(self, state: GraphState) -> GraphState:
        """
        Analyzes the user's query to classify it as either general or database-specific
        and determines the next processing step.

        Args:
            state (GraphState): Current graph state containing the user's query.
            llm (LLM): Language model used for query analysis.

        Returns:
            GraphState: Updated state with the classification result and the
                        next processing step in "next_step".
        """

        print("> analyse query")
        prompt_template = """Your task is to analyze the query  ```{query}``` and classify it in:
        grant: Grant Index - A query where users seek information about grants, funding opportunities, and grantmakers. This includes inquiries about the purpose of funding, eligibility criteria, application processes, grant recipients, funding amounts, deadlines, and how grants can be used for specific projects or initiatives. Users may also request grants tailored to their unique needs, industries, or social impact goals

        org: Org Index - Query which asks speicific details about the organizations, their mission statement, where they are located
        Output format:
        {{"category": "<your_classification>"}}
        """
        parser = PydanticOutputParser(pydantic_object=AnalysisResult)

        # Create the prompt
        prompt = PromptTemplate(
            template=prompt_template,
            input_variables=["query"],
            partial_variables={"format_instructions": parser.get_format_instructions()},
        )
        # Create the chain
        chain = RunnableSequence(prompt, self.llm, parser)
        # Invoke the chain with the query
        response = chain.invoke({"query": state["query"]})
        if response.category == "grant":
            state["next_step"] = "grant-index"
        else:
            state["next_step"] = "org-index"
        return state

    def grant_index_agent(self, state: GraphState) -> GraphState:
        print("> Grant Index Agent")
        # autocoding test

        input_data = {
            "input": f"""
            You are an Elasticsearch database agent designed to accurately understand and respond to user queries. Follow these steps:

                1. Understand the user query to determine the required information.
                2. Query the indices in the Elasticsearch database.
                3. Retrieve the mappings and field names relevant to the query.
                4. Use the ``grants_qa_1`` index to extract the necessary data.
                5. Ensure that you correctly identify the grantmaker (funder) or recipient (funded entity) if mentioned in the query. 
                   Users may not always provide the exact name, so the Elasticsearch query should accommodate partial or incomplete names 
                   by searching for relevant keywords.
                6. Present the response in a clear and natural language format, addressing the user's question directly.
    
            Description of some of the fields in the index but rest of the fields which are not here should be easy to understand:
                *fiscal_year: Year when grantmaker allocates budget for funding and grants. format YYYY 
                *recipient_state: is abbreviated for eg. NY, FL, CA
                *recipient_city - Full Name of the City e.g, New York City, Boston 
                *recipient_country - Country Abbreviation of the recipient organization e.g USA

            Note: Do not include `title`, `program_area`, `text` field in the elastic search query
            User's query:
            ```{state["query"]}```
            """
        }
        pcs_codes = state["pcs_codes"]
        pcs_match_term = ""
        for pcs_code in pcs_codes["subject"]:
            if pcs_code != "Philanthropy":
                pcs_match_term += f"*'pcs_v3.subject.value.name': {pcs_code}* \n"

        for pcs_code in pcs_codes["population"]:
            if pcs_code != "Other population":
                pcs_match_term += f"*'pcs_v3.population.value.name': {pcs_code}* \n"
        print("pcs_match_term", pcs_match_term)
        prefix = f""" 
        You are an intelligent agent tasked with generating accurate Elasticsearch DSL queries.
        Analyze the intent behind the query and determine the appropriate Elasticsearch operations required. 
        Guidelines for generating right elastic seach query:
            1. Automatically determine whether to return document hits or aggregation results based on the query structure.
            2. Use keyword fields instead of text fields for aggregations and sorting to avoid fielddata errors
            3. Avoid using field.keyword if a keyword field is already present to prevent redundant queries.
            4. Ensure efficient query execution by selecting appropriate query types for filtering, searching, and aggregating.

         Instruction for pcs_v3 Field-
            If {pcs_codes['subject']} not empty:
            Only include all of the following match terms. No other pcs_v3 fields should be added, duplicated, or altered except for those listed below.
                - {pcs_match_term}
        """
        agent_exec = self.agent_factory_claude(
            pcs_codes=state["pcs_codes"], prefix=prefix
        )
        res = agent_exec.invoke(input_data)
        state["agent_out"] = res["output"]
        es_queries, es_results = {}, {}
        for i, action in enumerate(res.get("intermediate_steps", []), start=1):
            if action[0].tool == "elastic_index_search_tool":
                print("query", action[0].tool_input.get("query"))
                es_queries[f"query_{i}"] = json.loads(
                    action[0].tool_input.get("query") or "{}"
                )
                es_results[f"query_{i}"] = ast.literal_eval(action[-1] or "{}")

        state["es_query"] = es_queries
        state["es_result"] = es_results
        return state

    def org_index_agent(self, state: GraphState) -> GraphState:
        """
        Executes a database query using an Elasticsearch agent and updates the graph state.

        The agent queries indices and field names in the Elasticsearch database,
        selects the appropriate index (`organization_dev_2`), and answers the user's question.

        Args:
            state (GraphState): Current graph state containing the user's query.

        Returns:
            GraphState: Updated state with the agent's output in "agent_out" and
                        the Elasticsearch query in "es_query".
        """

        print("> Org Index Agent")
        mapping_description = """ 
                    "admin1_code": "state abbreviation"
                    "admin1_description": "Full name/label of the state"
                    "city": Full Name of the city with 1st letter being capital for e.g. New York City
                    "assets": "The assets value of the most recent fiscals available for the organization."
                    "country_code": "Country abbreviation"
                    "country_name": "Country name"
                    "fiscal_year": "The year of the most recent fiscals available for the organization. (YYYY format)"
                    "mission_statement": "The mission statement of the organization."
                    "roles": "grantmaker: Indicates the organization gives grants., recipient: Indicates the organization receives grants., company: Indicates the organization is a company/corporation."

            """
        input_data = {
            "input": f"""
            You are an Elasticsearch database agent designed to accurately understand and respond to user queries. Follow these steps:

                1. Understand the user query to determine the required information.
                2. Query the indices in the Elasticsearch database.
                3. Retrieve the mappings and field names relevant to the query.
                4. Use the `organization_qa_ds1` index to extract the necessary data.
                5. Present the response in a clear and natural language format, addressing the user's question directly.
            
                
            Given Below is mapping description of some of the fields
            ```{mapping_description}```


            User's query:
            ```{state["query"]}```
            """
        }

        pcs_codes = state["pcs_codes"]
        pcs_match_term = ""
        for pcs_code in pcs_codes["subject"]:
            pcs_match_term += f'"taxonomy_descriptions": "{pcs_code}"  \n"'

        print("pcs_match_term", pcs_match_term)
        prefix = f"""You are an intelligent agent tasked with generating accurate Elasticsearch DSL queries.
        Analyze the intent behind the query and determine the appropriate Elasticsearch operations required. 
        Guidelines for generating right elastic seach query:
            1. Automatically determine whether to return document hits or aggregation results based on the query structure.
            2. Use keyword fields instead of text fields for aggregations and sorting to avoid fielddata errors
            3. Avoid using field.keyword if a keyword field is already present to prevent redundant queries.
            4. Ensure efficient query execution by selecting appropriate query types for filtering, searching, and aggregating.
            
            Instructions to use `taxonomy_descriptions` field:
            If {pcs_codes['subject']} not empty, only add the following match term:
                    Only add the following `match` term, No other `taxonomy_descriptions` fields should be added, duplicated, or modified except belowIf {pcs_codes['subject']} not empty,
                    - {pcs_match_term}


            Avoid using `ntee_major_description` field in the es query

            """
        agent_exec = self.agent_factory_claude(
            pcs_codes=state["pcs_codes"], prefix=prefix
        )
        res = agent_exec.invoke(input_data)
        state["agent_out"] = res["output"]

        es_queries, es_results = {}, {}
        for i, action in enumerate(res.get("intermediate_steps", []), start=1):
            if action[0].tool == "elastic_index_search_tool":
                es_queries[f"query_{i}"] = json.loads(
                    action[0].tool_input.get("query") or "{}"
                )
                es_results[f"query_{i}"] = ast.literal_eval(action[-1] or "{}")

        state["es_query"] = es_queries
        state["es_result"] = es_results
        return state

    def final_answer(self, state: GraphState) -> GraphState:
        """
        Generates and presents the final response based on the user's query and the AI's output.

        Args:
            state (GraphState): Current graph state containing the query and AI output.
            llm (LLM): Language model used to format the final response.

        Returns:
            GraphState: Updated state with the formatted final answer in "agent_out".
        """

        print("> Final Answer")
        prompt_template = """
        You are a chat agent that takes outputs generated by Elasticsearch and presents them in a conversational, natural language format, as if responding to a user's query.

        Query: ```{query}```

        AI Output: 
        ```{output}```
        """
        prompt = ChatPromptTemplate.from_template(prompt_template)
        chain = RunnableSequence(prompt, self.llm)
        response = chain.invoke({"query": state["query"], "output": state["agent_out"]})

        return {"agent_out": response.content}

    def construct_graph(self) -> StateGraph:
        """
        Constructs a compute graph for processing user queries using a defined workflow.

        The workflow includes nodes for query analysis, handling general or database-specific queries,
        and generating the final response. Conditional logic determines the path based on query type.

        Args:
            llm (LLM): Language model to be used in various nodes for processing queries.

        Returns:
            StateGraph: Configured compute graph ready for execution.
        """

        # Add nodes
        self.add_node("Context_Extraction", self.Extract_PCS_Codes)
        self.add_node("analyse", self.analyse_query)
        self.add_node("grant-index", self.grant_index_agent)
        self.add_node("org-index", self.org_index_agent)
        self.add_node("final_answer", self.final_answer)

        # Set entry point
        self.set_entry_point("Context_Extraction")
        self.add_edge("Context_Extraction", "analyse")

        # Add conditional edges
        self.add_conditional_edges(
            "analyse",
            lambda x: x["next_step"],  # Use the return value of analyse_query directly
            {"org-index": "org-index", "grant-index": "grant-index"},
        )

        # Add edges to end the workflow
        self.add_edge("org-index", "final_answer")
        self.add_edge("grant-index", "final_answer")
        self.add_edge("final_answer", END)


def build_elastic_graph(llm: LLM, tools: List[Tool]):
    """Compile Elastic Agent Graph"""
    elastic_graph = ElasticGraph(llm=llm, tools=tools)
    graph = elastic_graph.compile()
    return graph