classBeliefState:
def __init__(self):
self.hypotheses = [] # 每个是假设 dict: {"desc":...,"confidence":...,"evidence": [...]}
self.evidence = [] # 搜集到的原始片段
self.history = [] # 交互记录
def update_with_evidence(self, new_evidence):
self.evidence.extend(new_evidence)
# 简单示例:提升与某些假设相关的置信度
forh in self.hypotheses:
ifany(tag in new_evidence_itemfortag in extract_keywords(h["desc"])fornew_evidence_item in new_evidence):
h["confidence"] = min(1.0, h["confidence"] +0.2)
def add_hypotheses(self, hypos):
self.hypotheses.extend(hypos)
def best_hypothesis(self):
returnmax(self.hypotheses, key=lambda h: h["confidence"],default=None)
def exploratory_agent(initial_question, tools, llm_call, belief: BeliefState):
#1. 初始化假设
initial_hypos = generate_initial_hypotheses(initial_question) # 返回若干 candidate hypothesis
belief.add_hypotheses(initial_hypos)
whileTrue:
#2. 选择当前最值得探索的假设(带不确定性/证据缺口)
hypo = select_hypothesis_to_explore(belief)
ifnot hypo:
break# 没有可用假设了
#3. 构造搜索 query(关键词/路径)并调用最合适工具
search_queries = formulate_search_queries(hypo)
exploration_results = invoke_tools(search_queries, tools)
#4. 更新信念状态(把新证据与假设关联)
belief.update_with_evidence(exploration_results)
belief.history.append({
"role":"explore",
"hypothesis": hypo,
"results": exploration_results
})
#5. 让 LLM “思考”当前证据:自我校验 + 生成下一步动作
think_prompt = build_think_prompt(initial_question, belief)
llm_output = llm_call(think_prompt)
parsed = interpret_llm_output(llm_output)
# parsed 可能包含:修正的假设、新生成的假设、是否要继续探索、候选答案、反例检查请求等
ifparsed.get("new_hypotheses"):
belief.add_hypotheses(parsed["new_hypotheses"])
ifparsed.get("evidence"):
belief.update_with_evidence(parsed["evidence"])
belief.history.append({
"role":"think",
"prompt": think_prompt,
"llm_output": llm_output
})
#6. 终止判断(置信度足够 + 反例检查通过)
ifshould_terminate(belief, parsed):
final_answer = synthesize_answer(belief, parsed)
returnfinal_answer, belief.history
# 否则继续循环:可能调整问题、分发子 Agent、追问细节
|