最佳开发实践


本地大模型调用

from langchain_community.llms import Ollama

llm = Ollama(model="llama2-chinese:7b-chat-q4_0")
llm.invoke(input="你好啊")

#使用流式
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

llm = Ollama(
    model="llama2-chinese:7b-chat-q4_0", callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
llm.invoke(input="第一个登上月球的人是谁?")

模型评估

from langchain_openai import OpenAI
from langchain.evaluation import load_evaluator
llm = OpenAI()
evaluator = load_evaluator("criteria", llm=llm, criteria="conciseness")
eval_result = evaluator.evaluate_strings(
    prediction="four.",
    input="What's 2+2?",
)
print(eval_result)

#配置要评测的模型1
from langchain.evaluation import load_evaluator
from langchain_community.llms.chatglm import ChatGLM

llm = ChatGLM(
    endpoint_url="https://u41510-8a66-bea62533.westc.gpuhub.com:8443/",
    #top_p=0.9,
    history=[],
    max_token=80000,
    model_kwargs={"sample_model_args": False},
)
#内置标准:回答是否简明,criteria标准,conciseness简明度
evaluator = load_evaluator("criteria", llm=llm, criteria="conciseness")


模型比较


#比较openai、ChatGLM、ChatOllama三个模型的效果
llms = [
    OpenAI(temperature=0),
    ChatGLM(endpoint_url="https://u41510-8a66-bea62533.westc.gpuhub.com:8443/",history=[],),
    ChatOllama(model="llama2-chinese"),
]
model_lab = ModelLaboratory.from_llms(llms)
model_lab.compare("齐天大圣的师傅是谁?")

调试

#创建LLM 
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model_name="gpt-4", temperature=0)

#定义agent的prompt
#https://smith.langchain.com/hub/hwchase17/openai-functions-agent
from langchain import hub
prompt = hub.pull("hwchase17/openai-functions-agent")
prompt.messages

#定义工具,加载预制的工具,注意有的工具需要提供LLM
from langchain.agents import load_tools

tools = load_tools(["llm-math"], llm=llm)
tools

#创建agent
from langchain.agents import create_openai_functions_agent
agent = create_openai_functions_agent(llm, tools, prompt)

#定义agent的执行器,这里注意与老版本的不同
from langchain.agents import AgentExecutor
agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)

agent_executor.invoke({"input": "hi!"})

set_debug

from langchain.globals import set_debug

set_debug(True)
agent_executor.invoke({"input": "你好啊!"})
Prev post

LangChain 027