from llmware.agents import LLMfx
text = ("Tesla stock fell 8% in premarket trading after reporting fourth-quarter revenue and profit that ""missed analysts’ estimates. The electric vehicle company also warned that vehicle volume growth in ""2024 'may be notably lower' than last year’s growth rate. Automotive revenue, meanwhile, increased ""just 1% from a year earlier, partly because the EVs were selling for less than they had in the past. ""Tesla implemented steep price cuts in the second half of the year around the world. In a Wednesday ""presentation, the company warned investors that it’s 'currently between two major growth waves.'")
# create an agent using LLMfx classagent = LLMfx()
# load text to processagent.load_work(text)
# load 'models' as 'tools' to be used in analysis processagent.load_tool("sentiment")agent.load_tool("extract")agent.load_tool("topics")agent.load_tool("boolean")
# run function calls using different toolsagent.sentiment()agent.topics()agent.extract(params=["company"])agent.extract(params=["automotive revenue growth"])agent.xsum()agent.boolean(params=["is 2024 growth expected to be strong? (explain)"])
# at end of processing, show the report that was automatically aggregated by keyreport = agent.show_report()
# displays a summary of the activity in the processactivity_summary = agent.activity_summary()
# list of the responses gatheredfor i, entries in enumerate(agent.response_list):print("update: response analysis: ", i, entries)
output = {"report": report, "activity_summary": activity_summary, "journal": agent.journal}
# This example illustrates a simple contract analysis# using a RAG-optimized LLM running locally
import osimport refrom llmware.prompts import Prompt, HumanInTheLoopfrom llmware.setup import Setupfrom llmware.configs import LLMWareConfig
def contract_analysis_on_laptop (model_name):
#In this scenario, we will:#-- download a set of sample contract files#-- create a Prompt and load a BLING LLM model#-- parse each contract, extract the relevant passages, and pass questions to a local LLM
#Main loop - Iterate thru each contract:##1.parse the document in memory (convert from PDF file into text chunks with metadata)#2.filter the parsed text chunks with a "topic" (e.g., "governing law") to extract relevant passages#3.package and assemble the text chunks into a model-ready context#4.ask three key questions for each contract to the LLM#5.print to the screen#6.save the results in both json and csv for furthe processing and review.
#Load the llmware sample files
print (f"\n > Loading the llmware sample files...")
sample_files_path = Setup().load_sample_files()contracts_path = os.path.join(sample_files_path,"Agreements")
#Query list - these are the 3 main topics and questions that we would like the LLM to analyze for each contract
query_list = {"executive employment agreement": "What are the name of the two parties?","base salary": "What is the executive's base salary?","vacation": "How many vacation days will the executive receive?"}
#Load the selected model by name that was passed into the function
print (f"\n > Loading model {model_name}...")
prompter = Prompt().load_model(model_name, temperature=0.0, sample=False)
#Main loop
for i, contract in enumerate(os.listdir(contracts_path)):
# excluding Mac file artifact (annoying, but fact of life in demos)if contract != ".DS_Store":
print("\nAnalyzing contract: ", str(i+1), contract)
print("LLM Responses:")
for key, value in query_list.items():
# step 1 + 2 + 3 above - contract is parsed, text-chunked, filtered by topic key,# ... and then packaged into the prompt
source = prompter.add_source_document(contracts_path, contract, query=key)
# step 4 above - calling the LLM with 'source' information already packaged into the prompt
responses = prompter.prompt_with_source(value, prompt_name="default_with_context")
# step 5 above - print out to screen
for r, response in enumerate(responses):print(key, ":", re.sub("[\n]"," ", response["llm_response"]).strip())
# We're done with this contract, clear the source from the promptprompter.clear_source_materials()
# step 6 above - saving the analysis to jsonl and csv
# Save jsonl report to jsonl to /prompt_history folderprint("\nPrompt state saved at: ", os.path.join(LLMWareConfig.get_prompt_path(),prompter.prompt_id))prompter.save_state()
# Save csv report that includes the model, response, prompt, and evidence for human-in-the-loop reviewcsv_output = HumanInTheLoop(prompter).export_current_interaction_to_csv()print("csv output saved at:", csv_output)
if __name__ == "__main__":
# use local cpu model - try the newest - RAG finetune of Phi-3 quantized and packaged in GGUFmodel = "bling-phi-3-gguf"
contract_analysis_on_laptop(model)