workflow_config:
name: "RAG with web search"
# List of tools that the agent can activate if the user instructions require it
available_tools:
- "web search"
nodes:
- name: "START"
conditional_edge:
routing_function: "routing_split"
conditions: ["edit_system_prompt", "filter_history"]
- name: "edit_system_prompt"
edges: ["filter_history"]
- name: "filter_history"
edges: ["dynamic_retrieve"]
- name: "dynamic_retrieve"
conditional_edge:
routing_function: "tool_routing"
conditions: ["run_tool", "generate_rag"]
- name: "run_tool"
edges: ["generate_rag"]
- name: "generate_rag" # the name of the last node, from which we want to stream the answer to the user
edges: ["END"]
tools:
- name: "cited_answer"
# Maximum number of previous conversation iterations
# to include in the context of the answer
max_history: 10
# Number of chunks returned by the retriever
k: 40
# Reranker configuration
reranker_config:
# The reranker supplier to use
supplier: "cohere"
# The model to use for the reranker for the given supplier
model: "rerank-multilingual-v3.0"
# Number of chunks returned by the reranker
top_n: 5
# Among the chunks returned by the reranker, only those with relevance
# scores equal or above the relevance_score_threshold will be returned
# to the LLM to generate the answer (allowed values are between 0 and 1,
# a value of 0.1 works well with the cohere and jina rerankers)
relevance_score_threshold: 0.01
# LLM configuration
llm_config:
# maximum number of tokens passed to the LLM to generate the answer
max_input_tokens: 8000
# temperature for the LLM
temperature: 0.7