Prompt+LLM
基本构成: PromptTemplate / ChatPromptTemplate -> LLM / ChatModel -> OutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt = ChatPromptTemplate.from_template("给我讲一个关于{foo}的笑话")
model = ChatOpenAI(
temperature=0,
model="gpt-3.5-turbo",
)
chain = prompt | model
chain.invoke({"foo": "狗熊"})
自定义停止输出符
chain = prompt | model.bind(stop=["\n"]) # 大模型输出的内容遇到换行就停止了
兼容openai函数调用的方式
functions = [
{
"name": "joke",
"description": "讲笑话",
"parameters": {
"type": "object",
"properties": {
"setup": {"type": "string", "description": "笑话的开头"},
"punchline": {
"type": "string",
"description": "爆梗的结尾",
},
},
"required": ["setup", "punchline"],
},
}
]
chain = prompt | model.bind(function_call={"name": "joke"}, functions=functions)
chain.invoke({"foo": "男人"}, config={})
输出解析器
from langchain_core.output_parsers import StrOutputParser
chain = prompt | model | StrOutputParser()
与函数调用混合使用
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
chain = (
prompt
| model.bind(function_call={"name": "joke"}, functions=functions)
| JsonOutputFunctionsParser() # 返回json格式
)
#只输出setup
chain = (
{"foo": RunnablePassthrough()} #使用RunnablePassthrough()跳过prompt
| prompt
| model.bind(function_call={"name": "joke"}, functions=functions)
| JsonKeyOutputFunctionsParser(key_name="setup") # 定义输出的key
)
使用Runnables来连接多链结构
from operator import itemgetter #获取可迭代对象中指定索引或键对应的元素
from langchain.schema import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
prompt1 = ChatPromptTemplate.from_template("{person}来自于哪个城市?")
prompt2 = ChatPromptTemplate.from_template(
"{city}属于哪个省? 用{language}来回答"
)
model = ChatOpenAI()
chain1 = prompt1 | model | StrOutputParser()
chain2 = (
{"city": chain1, "language": itemgetter("language")} #获取invoke中的language
| prompt2
| model
| StrOutputParser()
)
chain1.invoke({"person": "马化腾"})
#chain2.invoke({"person": "马化腾", "language": "中文"})
from langchain_core.runnables import RunnablePassthrough
prompt1 = ChatPromptTemplate.from_template(
"生成一个{attribute}属性的颜色。除了返回这个颜色的名字不要做其他事:"
)
prompt2 = ChatPromptTemplate.from_template(
"什么水果是这个颜色:{color},只返回这个水果的名字不要做其他事情:"
)
prompt3 = ChatPromptTemplate.from_template(
"哪个国家的国旗有这个颜色:{color},只返回这个国家的名字不要做其他事情:"
)
prompt4 = ChatPromptTemplate.from_template(
"有这个颜色的水果是{fruit},有这个颜色的国旗是{country}?"
)
model_parser = model | StrOutputParser()
# 生成一个颜色
color_generator = (
{"attribute": RunnablePassthrough()} | prompt1 | {"color": model_parser}
)
color_to_fruit = prompt2 | model_parser
color_to_country = prompt3 | model_parser
question_generator = (
color_generator | {"fruit": color_to_fruit, "country": color_to_country} | prompt4
)
question_generator.invoke("强烈的")
多链执行与结果合并
输入
/
/
分支1 分支2
\ /
\ /
合并结果
planner = (
ChatPromptTemplate.from_template("生成一个关于{input}的论点")
| ChatOpenAI()
| StrOutputParser()
| {"base_response": RunnablePassthrough()}
)
arguments_for = (
ChatPromptTemplate.from_template(
"列出以下内容的优点或积极方面:{base_response}"
)
| ChatOpenAI()
| StrOutputParser()
)
arguments_against = (
ChatPromptTemplate.from_template(
"列出以下内容的缺点或消极方面:{base_response}"
)
| ChatOpenAI()
| StrOutputParser()
)
final_responder = (
ChatPromptTemplate.from_messages(
[
("ai", "{original_response}"),
("human", "积极:\n{results_1}\n\n消极:\n{results_2}"),
("system", "根据评论生成最终的回复"),
]
)
| ChatOpenAI()
| StrOutputParser()
)
chain = (
planner
| {
"results_1": arguments_for,
"results_2": arguments_against,
"original_response": itemgetter("base_response"),
}
| final_responder
)
chain.invoke({"input": "生孩子"})