正常调用

#调用chatmodels,以openai为例

from langchain.chat_models import ChatOpenAI
from langchain.schema.messages import HumanMessage,AIMessage
import os
api_base = os.getenv("OPENAI_PROXY")
api_key = os.getenv("OPENAI_API_KEY")

chat = ChatOpenAI(
    model="gpt-4",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = api_base

)

messages = [
    AIMessage(role="system",content="你好,我是tomie!"),
    HumanMessage(role="user",content="你好tomie,我是狗剩!"),
    AIMessage(role="system",content="认识你很高兴!"),
    HumanMessage(role="user",content="你知道我叫什么吗?")
]

response = chat.invoke(messages)
print(response)

#print(chat.predict("你好"))

流式调用

#LLM类大模型的流式输出方法

from langchain.llms import OpenAI
import os
api_base = os.getenv("OPENAI_PROXY")
api_key = os.getenv("OPENAI_API_KEY")

#构造一个llm
llm = OpenAI(
    model = "gpt-3.5-turbo-instruct",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = api_base,
    max_tokens=512,
)

for chunk in llm.stream("写一首关于秋天的诗歌"):
    print(chunk,end="",flush=False)

追踪token

#LLM的toekn追踪
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import os
api_base = os.getenv("OPENAI_PROXY")
api_key = os.getenv("OPENAI_API_KEY")

#构造一个llm
llm = OpenAI(
    model = "gpt-3.5-turbo-instruct",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = api_base,
    max_tokens=512,
)

with get_openai_callback() as cb:
    result = llm.invoke("给我讲一个笑话")
    print(result)
    print(cb)

自定义输出

  • 输出函数参数
  • 输出json
  • 输出List
  • 输出日期
#讲笑话机器人:希望每次根据指令,可以输出一个这样的笑话(小明是怎么死的?笨死的)

from langchain.llms import  OpenAI
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.pydantic_v1 import BaseModel,Field,validator
from typing import  List
import os
api_base = os.getenv("OPENAI_PROXY")
api_key = os.getenv("OPENAI_API_KEY")

#构造LLM
model = OpenAI(
    model = "gpt-3.5-turbo-instruct",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = api_base,
)

#定义个数据模型,用来描述最终的实例结构
class Joke(BaseModel):
    setup:str = Field(description="设置笑话的问题")
    punchline:str = Field(description="回答笑话的答案")

    #验证问题是否符合要求
    @validator("setup")
    def question_mark(cls,field):
        if field[-1] != "":
            raise ValueError("不符合预期的问题格式!")
        return field

#将Joke数据模型传入
parser = PydanticOutputParser(pydantic_object=Joke)


prompt = PromptTemplate(
    template = "回答用户的输入.\n{format_instructions}\n{query}\n",
    input_variables = ["query"],
    partial_variables = {"format_instructions":parser.get_format_instructions()}
)

prompt_and_model = prompt | model
out_put = prompt_and_model.invoke({"query":"给我讲一个笑话"})
print("out_put:",out_put)
parser.invoke(out_put)

输出list

#LLM的输出格式化成python list形式,类似['a','b','c']

from langchain.output_parsers import  CommaSeparatedListOutputParser
from langchain.prompts import  PromptTemplate
from langchain.llms import OpenAI
import os
api_base = os.getenv("OPENAI_PROXY")
api_key = os.getenv("OPENAI_API_KEY")

#构造LLM
model = OpenAI(
    model = "gpt-3.5-turbo-instruct",
    temperature=0,
    openai_api_key = api_key,
    openai_api_base = api_base,
)

parser = CommaSeparatedListOutputParser()

prompt = PromptTemplate(
    template = "列出5个{subject}.\n{format_instructions}",
    input_variables = ["subject"],
    partial_variables = {"format_instructions":parser.get_format_instructions()}
)

_input = prompt.format(subject="常见的小狗的名字")
output = model(_input)
print(output)
#格式化
parser.parse(output)

Prev post

LangChain 004

Next post

LangChain 006