#!/usr/bin/env python3 # vim: set expandtab sw=4 ts=4 sts=4 foldmethod=indent filetype=python: import asyncio import os from langchain_core.output_parsers import StrOutputParser from langchain_core.runnables import RunnablePassthrough from langchain_openai import ChatOpenAI os.environ['OPENAI_API_KEY'] = '2708b7c21129e408899d5a38e6d1af8d' os.environ['OPENAI_API_BASE'] = 'http://localai.srvlan:8080' #|%%--%%| <9871Wi18GN|qMj6mA5jLr> r"""°°° # Basic invokation °°°""" #|%%--%%| llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.2, max_tokens=100) #|%%--%%| llm.invoke("how can you help me ?") #|%%--%%| r"""°°° # Using prompt templates °°°""" #|%%--%%| from langchain_core.prompts import ChatPromptTemplate prompt = ChatPromptTemplate.from_messages([ ("system", "You are a genius DIY maker assistant."), ("user", "{input}") ]) # combine llm and prompt to create a chain chain = prompt | llm chain.invoke(dict(input="what are the best adhesives to bind metal to wood ?")) #|%%--%%| r"""°°° # Creating a chain - Every element in chain implements Runnable interface (invoke, stream, ainvoke, batch ...) °°°""" #|%%--%%| prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}") output_parser = StrOutputParser() llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100) chain = ( {"topic": RunnablePassthrough()} | prompt | llm | output_parser ) chain.invoke("ice cream") #|%%--%%| prompt = ChatPromptTemplate.from_template("Tell me a short joke about {topic}") output_parser = StrOutputParser() llm = ChatOpenAI(model="dolphin-mixtral", temperature=0.6, max_tokens=100) chain = ( {"topic": RunnablePassthrough()} | prompt | llm | output_parser ) chain.invoke("ice cream") #|%%--%%| r"""°°° # Streaming the response https://python.langchain.com/docs/expression_language/streaming °°°""" #|%%--%%| async for chunk in chain.astream(dict(input="how can I bind metal to plastic ?")): print(chunk.content, end="", flush=True)