from llama_index.llms.openai_like import OpenAILike from llama_index.core.base.llms.types import ChatMessage, MessageRole from llama_index.core import PromptTemplate
llm = OpenAILike( model="llama3", api_base="you-local-llama3-api", api_key="fake_key", is_chat_model=True, ) prompt_str = "lease generate related movies to {movie_name}" prompt_tmpl = PromptTemplate(prompt_str) response = llm.chat( [ ChatMessage( role=MessageRole.SYSTEM, content="You are a helpful assistant.", ), ChatMessage( role=MessageRole.USER, content=prompt_tmpl.format(movie_name="Avengers"), ), ] ) print(f"response: {response}")
# 显示结果 response: assistant: Here are some movie recommendations that are similar to the Avengers franchise:
1. **Guardians of the Galaxy** (2014) - Another Marvel superhero team-up film, with a fun and quirky tone. 2. **The Justice League** (2017) - A DC Comics adaptation featuring iconic superheroes like Superman, Batman, Wonder Woman, and more. ......
在OpenAILike对象中,参数model为模型名称,api_base为本地 Llama3 的 API 服务地址
from llama_index.vector_stores.elasticsearch import ElasticsearchStore from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext from llama_index.core.node_parser import SentenceSplitter from llms import CustomEmbeddings
from typing import List from llama_index.core.schema import NodeWithScore
def fuse_results(results_dict, similarity_top_k: int = 2): """Fuse results.""" k = 60.0 fused_scores = {} text_to_node = {}
# 计算倒数排名分数 for nodes_with_scores in results_dict.values(): for rank, node_with_score in enumerate( sorted( nodes_with_scores, key=lambda x: x.score or 0.0, reverse=True ) ): text = node_with_score.node.get_content() text_to_node[text] = node_with_score if text not in fused_scores: fused_scores[text] = 0.0 fused_scores[text] += 1.0 / (rank + k)
def run_queries(query, retrievers): """Run query against retrievers.""" tasks = [] for i, retriever in enumerate(retrievers): tasks.append(retriever.aretrieve(query))
task_results = await tqdm.gather(*tasks)
results_dict = {} for i, query_result in enumerate(task_results): results_dict[(query, i)] = query_result
from typing import List from llama_index.core import QueryBundle from llama_index.core.retrievers import BaseRetriever from llama_index.core.schema import NodeWithScore import asyncio
class FusionRetriever(BaseRetriever): """Ensemble retriever with fusion."""
fusion_retriever = FusionRetriever( [text_retriever, vector_retriever], similarity_top_k=2 ) question = "Which two members of the Avengers created Ultron?" nodes = fusion_retriever.retrieve(question) for node in nodes: print("-" * 50) print(f"node content: {node.text[:100]}...") print(f"node score: {node.score}\n")
# 显示结果 -----------------------------------------------node content: In the Eastern European country of Sokovia, the Avengers—Tony Stark, Thor, Bruce Banner, Steve Roger... node score: 0.03306010928961749
-----------------------------------------------node content: Thor departs to consult with Dr. Erik Selvig on the apocalyptic future he saw in his hallucination, ... node score: 0.016666666666666666
# 显示结果 response: Tony Stark and Bruce Banner. -----------------------------------------------node content: In the Eastern European country of Sokovia, the Avengers—Tony Stark, Thor, Bruce Banner, Steve Roger... node score: 0.8329173
-----------------------------------------------node content: Thor departs to consult with Dr. Erik Selvig on the apocalyptic future he saw in his hallucination, ... node score: 0.24689633