Merge branch 'master' into together_parser

pull/21627/head
Mohammad Mohtashim 2 weeks ago committed by GitHub
commit 5a4862b0fc
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -177,7 +177,7 @@ jobs:
env:
MIN_VERSIONS: ${{ steps.min-version.outputs.min-versions }}
run: |
poetry run pip install --force-reinstall $MIN_VERSIONS
poetry run pip install --force-reinstall $MIN_VERSIONS --editable .
make tests
working-directory: ${{ inputs.working-directory }}
@ -222,6 +222,7 @@ jobs:
MONGODB_ATLAS_URI: ${{ secrets.MONGODB_ATLAS_URI }}
VOYAGE_API_KEY: ${{ secrets.VOYAGE_API_KEY }}
UPSTAGE_API_KEY: ${{ secrets.UPSTAGE_API_KEY }}
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
run: make integration_tests
working-directory: ${{ inputs.working-directory }}

@ -3,7 +3,7 @@
## help: Show this help info.
help: Makefile
@printf "\n\033[1mUsage: make <TARGETS> ...\033[0m\n\n\033[1mTargets:\033[0m\n\n"
@sed -n 's/^##//p' $< | awk -F':' '{printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' | sort | sed -e 's/^/ /'
@sed -n 's/^## //p' $< | awk -F':' '{printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' | sort | sed -e 's/^/ /'
## all: Default target, shows help.
all: help

File diff suppressed because one or more lines are too long

@ -13,7 +13,7 @@ OUTPUT_NEW_DOCS_DIR = $(OUTPUT_NEW_DIR)/docs
PYTHON = .venv/bin/python
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm|ai21" | tr '\n' ' ')
PARTNER_DEPS_LIST := $(shell find ../libs/partners -mindepth 1 -maxdepth 1 -type d -exec test -e "{}/pyproject.toml" \; -print | grep -vE "airbyte|ibm" | tr '\n' ' ')
PORT ?= 3001
@ -69,9 +69,9 @@ md-sync:
generate-references:
$(PYTHON) scripts/generate_api_reference_links.py --docs_dir $(OUTPUT_NEW_DOCS_DIR)
build: install-py-deps generate-files copy-infra render md-sync generate-references
build: install-py-deps generate-files copy-infra render md-sync
vercel-build: install-vercel-deps build
vercel-build: install-vercel-deps build generate-references
rm -rf docs
mv $(OUTPUT_NEW_DOCS_DIR) docs
rm -rf build

@ -0,0 +1,519 @@
# arXiv
LangChain implements the latest research in the field of Natural Language Processing.
This page contains `arXiv` papers referenced in the LangChain Documentation and API Reference.
## Summary
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation and API Reference |
|------------------|---------|-------------------|-------------------------|
| `2307.03172v3` [Lost in the Middle: How Language Models Use Long Contexts](http://arxiv.org/abs/2307.03172v3) | Nelson F. Liu, Kevin Lin, John Hewitt, et al. | 2023-07-06 | `Docs:` [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
| `2305.08291v1` [Large Language Model Guided Tree-of-Thought](http://arxiv.org/abs/2305.08291v1) | Jieyi Long | 2023-05-15 | `API:` [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
| `2305.06983v2` [Active Retrieval Augmented Generation](http://arxiv.org/abs/2305.06983v2) | Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al. | 2023-05-11 | `Docs:` [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
| `2303.17580v4` [HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face](http://arxiv.org/abs/2303.17580v4) | Yongliang Shen, Kaitao Song, Xu Tan, et al. | 2023-03-30 | `API:` [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
| `2303.08774v6` [GPT-4 Technical Report](http://arxiv.org/abs/2303.08774v6) | OpenAI, Josh Achiam, Steven Adler, et al. | 2023-03-15 | `Docs:` [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
| `2301.10226v4` [A Watermark for Large Language Models](http://arxiv.org/abs/2301.10226v4) | John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al. | 2023-01-24 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms...OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
| `2212.10496v1` [Precise Zero-Shot Dense Retrieval without Relevance Labels](http://arxiv.org/abs/2212.10496v1) | Luyu Gao, Xueguang Ma, Jimmy Lin, et al. | 2022-12-20 | `Docs:` [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde), `API:` [langchain.chains...HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
| `2212.08073v1` [Constitutional AI: Harmlessness from AI Feedback](http://arxiv.org/abs/2212.08073v1) | Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al. | 2022-12-15 | `Docs:` [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
| `2212.07425v3` [Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments](http://arxiv.org/abs/2212.07425v3) | Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al. | 2022-12-12 | `API:` [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
| `2211.13892v2` [Complementary Explanations for Effective In-Context Learning](http://arxiv.org/abs/2211.13892v2) | Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al. | 2022-11-25 | `API:` [langchain_core.example_selectors...MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
| `2211.10435v2` [PAL: Program-aided Language Models](http://arxiv.org/abs/2211.10435v2) | Luyu Gao, Aman Madaan, Shuyan Zhou, et al. | 2022-11-18 | `API:` [langchain_experimental.pal_chain...PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
| `2209.10785v2` [Deep Lake: a Lakehouse for Deep Learning](http://arxiv.org/abs/2209.10785v2) | Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al. | 2022-09-22 | `Docs:` [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
| `2205.12654v1` [Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages](http://arxiv.org/abs/2205.12654v1) | Kevin Heffernan, Onur Çelebi, Holger Schwenk | 2022-05-25 | `API:` [langchain_community.embeddings...LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
| `2204.00498v1` [Evaluating the Text-to-SQL Capabilities of Large Language Models](http://arxiv.org/abs/2204.00498v1) | Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau | 2022-03-15 | `Docs:` [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart), `API:` [langchain_community.utilities...SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities...SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
| `2202.00666v5` [Locally Typical Sampling](http://arxiv.org/abs/2202.00666v5) | Clara Meister, Tiago Pimentel, Gian Wiher, et al. | 2022-02-01 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `2103.00020v1` [Learning Transferable Visual Models From Natural Language Supervision](http://arxiv.org/abs/2103.00020v1) | Alec Radford, Jong Wook Kim, Chris Hallacy, et al. | 2021-02-26 | `API:` [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
| `1909.05858v2` [CTRL: A Conditional Transformer Language Model for Controllable Generation](http://arxiv.org/abs/1909.05858v2) | Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al. | 2019-09-11 | `API:` [langchain_community.llms...HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms...HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
| `1908.10084v1` [Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks](http://arxiv.org/abs/1908.10084v1) | Nils Reimers, Iryna Gurevych | 2019-08-27 | `Docs:` [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
## Lost in the Middle: How Language Models Use Long Contexts
- **arXiv id:** 2307.03172v3
- **Title:** Lost in the Middle: How Language Models Use Long Contexts
- **Authors:** Nelson F. Liu, Kevin Lin, John Hewitt, et al.
- **Published Date:** 2023-07-06
- **URL:** http://arxiv.org/abs/2307.03172v3
- **LangChain Documentation:** [docs/modules/data_connection/retrievers/long_context_reorder](https://python.langchain.com/docs/modules/data_connection/retrievers/long_context_reorder)
**Abstract:** While recent language models have the ability to take long contexts as input,
relatively little is known about how well they use longer context. We analyze
the performance of language models on two tasks that require identifying
relevant information in their input contexts: multi-document question answering
and key-value retrieval. We find that performance can degrade significantly
when changing the position of relevant information, indicating that current
language models do not robustly make use of information in long input contexts.
In particular, we observe that performance is often highest when relevant
information occurs at the beginning or end of the input context, and
significantly degrades when models must access relevant information in the
middle of long contexts, even for explicitly long-context models. Our analysis
provides a better understanding of how language models use their input context
and provides new evaluation protocols for future long-context language models.
## Large Language Model Guided Tree-of-Thought
- **arXiv id:** 2305.08291v1
- **Title:** Large Language Model Guided Tree-of-Thought
- **Authors:** Jieyi Long
- **Published Date:** 2023-05-15
- **URL:** http://arxiv.org/abs/2305.08291v1
- **LangChain API Reference:** [langchain_experimental.tot](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.tot)
**Abstract:** In this paper, we introduce the Tree-of-Thought (ToT) framework, a novel
approach aimed at improving the problem-solving capabilities of auto-regressive
large language models (LLMs). The ToT technique is inspired by the human mind's
approach for solving complex reasoning tasks through trial and error. In this
process, the human mind explores the solution space through a tree-like thought
process, allowing for backtracking when necessary. To implement ToT as a
software system, we augment an LLM with additional modules including a prompter
agent, a checker module, a memory module, and a ToT controller. In order to
solve a given problem, these modules engage in a multi-round conversation with
the LLM. The memory module records the conversation and state history of the
problem solving process, which allows the system to backtrack to the previous
steps of the thought-process and explore other directions from there. To verify
the effectiveness of the proposed technique, we implemented a ToT-based solver
for the Sudoku Puzzle. Experimental results show that the ToT framework can
significantly increase the success rate of Sudoku puzzle solving. Our
implementation of the ToT-based Sudoku solver is available on GitHub:
\url{https://github.com/jieyilong/tree-of-thought-puzzle-solver}.
## Active Retrieval Augmented Generation
- **arXiv id:** 2305.06983v2
- **Title:** Active Retrieval Augmented Generation
- **Authors:** Zhengbao Jiang, Frank F. Xu, Luyu Gao, et al.
- **Published Date:** 2023-05-11
- **URL:** http://arxiv.org/abs/2305.06983v2
- **LangChain Documentation:** [docs/modules/chains](https://python.langchain.com/docs/modules/chains)
**Abstract:** Despite the remarkable ability of large language models (LMs) to comprehend
and generate language, they have a tendency to hallucinate and create factually
inaccurate output. Augmenting LMs by retrieving information from external
knowledge resources is one promising solution. Most existing retrieval
augmented LMs employ a retrieve-and-generate setup that only retrieves
information once based on the input. This is limiting, however, in more general
scenarios involving generation of long texts, where continually gathering
information throughout generation is essential. In this work, we provide a
generalized view of active retrieval augmented generation, methods that
actively decide when and what to retrieve across the course of the generation.
We propose Forward-Looking Active REtrieval augmented generation (FLARE), a
generic method which iteratively uses a prediction of the upcoming sentence to
anticipate future content, which is then utilized as a query to retrieve
relevant documents to regenerate the sentence if it contains low-confidence
tokens. We test FLARE along with baselines comprehensively over 4 long-form
knowledge-intensive generation tasks/datasets. FLARE achieves superior or
competitive performance on all tasks, demonstrating the effectiveness of our
method. Code and datasets are available at https://github.com/jzbjyb/FLARE.
## HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **arXiv id:** 2303.17580v4
- **Title:** HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face
- **Authors:** Yongliang Shen, Kaitao Song, Xu Tan, et al.
- **Published Date:** 2023-03-30
- **URL:** http://arxiv.org/abs/2303.17580v4
- **LangChain API Reference:** [langchain_experimental.autonomous_agents](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.autonomous_agents)
**Abstract:** Solving complicated AI tasks with different domains and modalities is a key
step toward artificial general intelligence. While there are numerous AI models
available for various domains and modalities, they cannot handle complicated AI
tasks autonomously. Considering large language models (LLMs) have exhibited
exceptional abilities in language understanding, generation, interaction, and
reasoning, we advocate that LLMs could act as a controller to manage existing
AI models to solve complicated AI tasks, with language serving as a generic
interface to empower this. Based on this philosophy, we present HuggingGPT, an
LLM-powered agent that leverages LLMs (e.g., ChatGPT) to connect various AI
models in machine learning communities (e.g., Hugging Face) to solve AI tasks.
Specifically, we use ChatGPT to conduct task planning when receiving a user
request, select models according to their function descriptions available in
Hugging Face, execute each subtask with the selected AI model, and summarize
the response according to the execution results. By leveraging the strong
language capability of ChatGPT and abundant AI models in Hugging Face,
HuggingGPT can tackle a wide range of sophisticated AI tasks spanning different
modalities and domains and achieve impressive results in language, vision,
speech, and other challenging tasks, which paves a new way towards the
realization of artificial general intelligence.
## GPT-4 Technical Report
- **arXiv id:** 2303.08774v6
- **Title:** GPT-4 Technical Report
- **Authors:** OpenAI, Josh Achiam, Steven Adler, et al.
- **Published Date:** 2023-03-15
- **URL:** http://arxiv.org/abs/2303.08774v6
- **LangChain Documentation:** [docs/integrations/vectorstores/mongodb_atlas](https://python.langchain.com/docs/integrations/vectorstores/mongodb_atlas)
**Abstract:** We report the development of GPT-4, a large-scale, multimodal model which can
accept image and text inputs and produce text outputs. While less capable than
humans in many real-world scenarios, GPT-4 exhibits human-level performance on
various professional and academic benchmarks, including passing a simulated bar
exam with a score around the top 10% of test takers. GPT-4 is a
Transformer-based model pre-trained to predict the next token in a document.
The post-training alignment process results in improved performance on measures
of factuality and adherence to desired behavior. A core component of this
project was developing infrastructure and optimization methods that behave
predictably across a wide range of scales. This allowed us to accurately
predict some aspects of GPT-4's performance based on models trained with no
more than 1/1,000th the compute of GPT-4.
## A Watermark for Large Language Models
- **arXiv id:** 2301.10226v4
- **Title:** A Watermark for Large Language Models
- **Authors:** John Kirchenbauer, Jonas Geiping, Yuxin Wen, et al.
- **Published Date:** 2023-01-24
- **URL:** http://arxiv.org/abs/2301.10226v4
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint), [langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI.html#langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI)
**Abstract:** Potential harms of large language models can be mitigated by watermarking
model output, i.e., embedding signals into generated text that are invisible to
humans but algorithmically detectable from a short span of tokens. We propose a
watermarking framework for proprietary language models. The watermark can be
embedded with negligible impact on text quality, and can be detected using an
efficient open-source algorithm without access to the language model API or
parameters. The watermark works by selecting a randomized set of "green" tokens
before a word is generated, and then softly promoting use of green tokens
during sampling. We propose a statistical test for detecting the watermark with
interpretable p-values, and derive an information-theoretic framework for
analyzing the sensitivity of the watermark. We test the watermark using a
multi-billion parameter model from the Open Pretrained Transformer (OPT)
family, and discuss robustness and security.
## Precise Zero-Shot Dense Retrieval without Relevance Labels
- **arXiv id:** 2212.10496v1
- **Title:** Precise Zero-Shot Dense Retrieval without Relevance Labels
- **Authors:** Luyu Gao, Xueguang Ma, Jimmy Lin, et al.
- **Published Date:** 2022-12-20
- **URL:** http://arxiv.org/abs/2212.10496v1
- **LangChain Documentation:** [docs/use_cases/query_analysis/techniques/hyde](https://python.langchain.com/docs/use_cases/query_analysis/techniques/hyde)
- **LangChain API Reference:** [langchain.chains.hyde.base.HypotheticalDocumentEmbedder](https://api.python.langchain.com/en/latest/chains/langchain.chains.hyde.base.HypotheticalDocumentEmbedder.html#langchain.chains.hyde.base.HypotheticalDocumentEmbedder)
**Abstract:** While dense retrieval has been shown effective and efficient across tasks and
languages, it remains difficult to create effective fully zero-shot dense
retrieval systems when no relevance label is available. In this paper, we
recognize the difficulty of zero-shot learning and encoding relevance. Instead,
we propose to pivot through Hypothetical Document Embeddings~(HyDE). Given a
query, HyDE first zero-shot instructs an instruction-following language model
(e.g. InstructGPT) to generate a hypothetical document. The document captures
relevance patterns but is unreal and may contain false details. Then, an
unsupervised contrastively learned encoder~(e.g. Contriever) encodes the
document into an embedding vector. This vector identifies a neighborhood in the
corpus embedding space, where similar real documents are retrieved based on
vector similarity. This second step ground the generated document to the actual
corpus, with the encoder's dense bottleneck filtering out the incorrect
details. Our experiments show that HyDE significantly outperforms the
state-of-the-art unsupervised dense retriever Contriever and shows strong
performance comparable to fine-tuned retrievers, across various tasks (e.g. web
search, QA, fact verification) and languages~(e.g. sw, ko, ja).
## Constitutional AI: Harmlessness from AI Feedback
- **arXiv id:** 2212.08073v1
- **Title:** Constitutional AI: Harmlessness from AI Feedback
- **Authors:** Yuntao Bai, Saurav Kadavath, Sandipan Kundu, et al.
- **Published Date:** 2022-12-15
- **URL:** http://arxiv.org/abs/2212.08073v1
- **LangChain Documentation:** [docs/guides/productionization/evaluation/string/criteria_eval_chain](https://python.langchain.com/docs/guides/productionization/evaluation/string/criteria_eval_chain)
**Abstract:** As AI systems become more capable, we would like to enlist their help to
supervise other AIs. We experiment with methods for training a harmless AI
assistant through self-improvement, without any human labels identifying
harmful outputs. The only human oversight is provided through a list of rules
or principles, and so we refer to the method as 'Constitutional AI'. The
process involves both a supervised learning and a reinforcement learning phase.
In the supervised phase we sample from an initial model, then generate
self-critiques and revisions, and then finetune the original model on revised
responses. In the RL phase, we sample from the finetuned model, use a model to
evaluate which of the two samples is better, and then train a preference model
from this dataset of AI preferences. We then train with RL using the preference
model as the reward signal, i.e. we use 'RL from AI Feedback' (RLAIF). As a
result we are able to train a harmless but non-evasive AI assistant that
engages with harmful queries by explaining its objections to them. Both the SL
and RL methods can leverage chain-of-thought style reasoning to improve the
human-judged performance and transparency of AI decision making. These methods
make it possible to control AI behavior more precisely and with far fewer human
labels.
## Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **arXiv id:** 2212.07425v3
- **Title:** Robust and Explainable Identification of Logical Fallacies in Natural Language Arguments
- **Authors:** Zhivar Sourati, Vishnu Priya Prasanna Venkatesh, Darshan Deshpande, et al.
- **Published Date:** 2022-12-12
- **URL:** http://arxiv.org/abs/2212.07425v3
- **LangChain API Reference:** [langchain_experimental.fallacy_removal](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.fallacy_removal)
**Abstract:** The spread of misinformation, propaganda, and flawed argumentation has been
amplified in the Internet era. Given the volume of data and the subtlety of
identifying violations of argumentation norms, supporting information analytics
tasks, like content moderation, with trustworthy methods that can identify
logical fallacies is essential. In this paper, we formalize prior theoretical
work on logical fallacies into a comprehensive three-stage evaluation framework
of detection, coarse-grained, and fine-grained classification. We adapt
existing evaluation datasets for each stage of the evaluation. We employ three
families of robust and explainable methods based on prototype reasoning,
instance-based reasoning, and knowledge injection. The methods combine language
models with background knowledge and explainable mechanisms. Moreover, we
address data sparsity with strategies for data augmentation and curriculum
learning. Our three-stage framework natively consolidates prior datasets and
methods from existing tasks, like propaganda detection, serving as an
overarching evaluation testbed. We extensively evaluate these methods on our
datasets, focusing on their robustness and explainability. Our results provide
insight into the strengths and weaknesses of the methods on different
components and fallacy classes, indicating that fallacy identification is a
challenging task that may require specialized forms of reasoning to capture
various classes. We share our open-source code and data on GitHub to support
further work on logical fallacy identification.
## Complementary Explanations for Effective In-Context Learning
- **arXiv id:** 2211.13892v2
- **Title:** Complementary Explanations for Effective In-Context Learning
- **Authors:** Xi Ye, Srinivasan Iyer, Asli Celikyilmaz, et al.
- **Published Date:** 2022-11-25
- **URL:** http://arxiv.org/abs/2211.13892v2
- **LangChain API Reference:** [langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector](https://api.python.langchain.com/en/latest/example_selectors/langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector.html#langchain_core.example_selectors.semantic_similarity.MaxMarginalRelevanceExampleSelector)
**Abstract:** Large language models (LLMs) have exhibited remarkable capabilities in
learning from explanations in prompts, but there has been limited understanding
of exactly how these explanations function or why they are effective. This work
aims to better understand the mechanisms by which explanations are used for
in-context learning. We first study the impact of two different factors on the
performance of prompts with explanations: the computation trace (the way the
solution is decomposed) and the natural language used to express the prompt. By
perturbing explanations on three controlled tasks, we show that both factors
contribute to the effectiveness of explanations. We further study how to form
maximally effective sets of explanations for solving a given test query. We
find that LLMs can benefit from the complementarity of the explanation set:
diverse reasoning skills shown by different exemplars can lead to better
performance. Therefore, we propose a maximal marginal relevance-based exemplar
selection approach for constructing exemplar sets that are both relevant as
well as complementary, which successfully improves the in-context learning
performance across three real-world tasks on multiple LLMs.
## PAL: Program-aided Language Models
- **arXiv id:** 2211.10435v2
- **Title:** PAL: Program-aided Language Models
- **Authors:** Luyu Gao, Aman Madaan, Shuyan Zhou, et al.
- **Published Date:** 2022-11-18
- **URL:** http://arxiv.org/abs/2211.10435v2
- **LangChain API Reference:** [langchain_experimental.pal_chain.base.PALChain](https://api.python.langchain.com/en/latest/pal_chain/langchain_experimental.pal_chain.base.PALChain.html#langchain_experimental.pal_chain.base.PALChain), [langchain_experimental.pal_chain](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.pal_chain)
**Abstract:** Large language models (LLMs) have recently demonstrated an impressive ability
to perform arithmetic and symbolic reasoning tasks, when provided with a few
examples at test time ("few-shot prompting"). Much of this success can be
attributed to prompting methods such as "chain-of-thought'', which employ LLMs
for both understanding the problem description by decomposing it into steps, as
well as solving each step of the problem. While LLMs seem to be adept at this
sort of step-by-step decomposition, LLMs often make logical and arithmetic
mistakes in the solution part, even when the problem is decomposed correctly.
In this paper, we present Program-Aided Language models (PAL): a novel approach
that uses the LLM to read natural language problems and generate programs as
the intermediate reasoning steps, but offloads the solution step to a runtime
such as a Python interpreter. With PAL, decomposing the natural language
problem into runnable steps remains the only learning task for the LLM, while
solving is delegated to the interpreter. We demonstrate this synergy between a
neural LLM and a symbolic interpreter across 13 mathematical, symbolic, and
algorithmic reasoning tasks from BIG-Bench Hard and other benchmarks. In all
these natural language reasoning tasks, generating code using an LLM and
reasoning using a Python interpreter leads to more accurate results than much
larger models. For example, PAL using Codex achieves state-of-the-art few-shot
accuracy on the GSM8K benchmark of math word problems, surpassing PaLM-540B
which uses chain-of-thought by absolute 15% top-1. Our code and data are
publicly available at http://reasonwithpal.com/ .
## Deep Lake: a Lakehouse for Deep Learning
- **arXiv id:** 2209.10785v2
- **Title:** Deep Lake: a Lakehouse for Deep Learning
- **Authors:** Sasun Hambardzumyan, Abhinav Tuli, Levon Ghukasyan, et al.
- **Published Date:** 2022-09-22
- **URL:** http://arxiv.org/abs/2209.10785v2
- **LangChain Documentation:** [docs/integrations/providers/activeloop_deeplake](https://python.langchain.com/docs/integrations/providers/activeloop_deeplake)
**Abstract:** Traditional data lakes provide critical data infrastructure for analytical
workloads by enabling time travel, running SQL queries, ingesting data with
ACID transactions, and visualizing petabyte-scale datasets on cloud storage.
They allow organizations to break down data silos, unlock data-driven
decision-making, improve operational efficiency, and reduce costs. However, as
deep learning usage increases, traditional data lakes are not well-designed for
applications such as natural language processing (NLP), audio processing,
computer vision, and applications involving non-tabular datasets. This paper
presents Deep Lake, an open-source lakehouse for deep learning applications
developed at Activeloop. Deep Lake maintains the benefits of a vanilla data
lake with one key difference: it stores complex data, such as images, videos,
annotations, as well as tabular data, in the form of tensors and rapidly
streams the data over the network to (a) Tensor Query Language, (b) in-browser
visualization engine, or (c) deep learning frameworks without sacrificing GPU
utilization. Datasets stored in Deep Lake can be accessed from PyTorch,
TensorFlow, JAX, and integrate with numerous MLOps tools.
## Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **arXiv id:** 2205.12654v1
- **Title:** Bitext Mining Using Distilled Sentence Representations for Low-Resource Languages
- **Authors:** Kevin Heffernan, Onur Çelebi, Holger Schwenk
- **Published Date:** 2022-05-25
- **URL:** http://arxiv.org/abs/2205.12654v1
- **LangChain API Reference:** [langchain_community.embeddings.laser.LaserEmbeddings](https://api.python.langchain.com/en/latest/embeddings/langchain_community.embeddings.laser.LaserEmbeddings.html#langchain_community.embeddings.laser.LaserEmbeddings)
**Abstract:** Scaling multilingual representation learning beyond the hundred most frequent
languages is challenging, in particular to cover the long tail of low-resource
languages. A promising approach has been to train one-for-all multilingual
models capable of cross-lingual transfer, but these models often suffer from
insufficient capacity and interference between unrelated languages. Instead, we
move away from this approach and focus on training multiple language (family)
specific representations, but most prominently enable all languages to still be
encoded in the same representational space. To achieve this, we focus on
teacher-student training, allowing all encoders to be mutually compatible for
bitext mining, and enabling fast learning of new languages. We introduce a new
teacher-student training scheme which combines supervised and self-supervised
training, allowing encoders to take advantage of monolingual training data,
which is valuable in the low-resource setting.
Our approach significantly outperforms the original LASER encoder. We study
very low-resource languages and handle 50 African languages, many of which are
not covered by any other model. For these languages, we train sentence
encoders, mine bitexts, and validate the bitexts by training NMT systems.
## Evaluating the Text-to-SQL Capabilities of Large Language Models
- **arXiv id:** 2204.00498v1
- **Title:** Evaluating the Text-to-SQL Capabilities of Large Language Models
- **Authors:** Nitarshan Rajkumar, Raymond Li, Dzmitry Bahdanau
- **Published Date:** 2022-03-15
- **URL:** http://arxiv.org/abs/2204.00498v1
- **LangChain Documentation:** [docs/use_cases/sql/quickstart](https://python.langchain.com/docs/use_cases/sql/quickstart)
- **LangChain API Reference:** [langchain_community.utilities.sql_database.SQLDatabase](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.sql_database.SQLDatabase.html#langchain_community.utilities.sql_database.SQLDatabase), [langchain_community.utilities.spark_sql.SparkSQL](https://api.python.langchain.com/en/latest/utilities/langchain_community.utilities.spark_sql.SparkSQL.html#langchain_community.utilities.spark_sql.SparkSQL)
**Abstract:** We perform an empirical evaluation of Text-to-SQL capabilities of the Codex
language model. We find that, without any finetuning, Codex is a strong
baseline on the Spider benchmark; we also analyze the failure modes of Codex in
this setting. Furthermore, we demonstrate on the GeoQuery and Scholar
benchmarks that a small number of in-domain examples provided in the prompt
enables Codex to perform better than state-of-the-art models finetuned on such
few-shot examples.
## Locally Typical Sampling
- **arXiv id:** 2202.00666v5
- **Title:** Locally Typical Sampling
- **Authors:** Clara Meister, Tiago Pimentel, Gian Wiher, et al.
- **Published Date:** 2022-02-01
- **URL:** http://arxiv.org/abs/2202.00666v5
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
**Abstract:** Today's probabilistic language generators fall short when it comes to
producing coherent and fluent text despite the fact that the underlying models
perform well under standard metrics, e.g., perplexity. This discrepancy has
puzzled the language generation community for the last few years. In this work,
we posit that the abstraction of natural language generation as a discrete
stochastic process--which allows for an information-theoretic analysis--can
provide new insights into the behavior of probabilistic language generators,
e.g., why high-probability texts can be dull or repetitive. Humans use language
as a means of communicating information, aiming to do so in a simultaneously
efficient and error-minimizing manner; in fact, psycholinguistics research
suggests humans choose each word in a string with this subconscious goal in
mind. We formally define the set of strings that meet this criterion: those for
which each word has an information content close to the expected information
content, i.e., the conditional entropy of our model. We then propose a simple
and efficient procedure for enforcing this criterion when generating from
probabilistic models, which we call locally typical sampling. Automatic and
human evaluations show that, in comparison to nucleus and top-k sampling,
locally typical sampling offers competitive performance (in both abstractive
summarization and story generation) in terms of quality while consistently
reducing degenerate repetitions.
## Learning Transferable Visual Models From Natural Language Supervision
- **arXiv id:** 2103.00020v1
- **Title:** Learning Transferable Visual Models From Natural Language Supervision
- **Authors:** Alec Radford, Jong Wook Kim, Chris Hallacy, et al.
- **Published Date:** 2021-02-26
- **URL:** http://arxiv.org/abs/2103.00020v1
- **LangChain API Reference:** [langchain_experimental.open_clip](https://api.python.langchain.com/en/latest/experimental_api_reference.html#module-langchain_experimental.open_clip)
**Abstract:** State-of-the-art computer vision systems are trained to predict a fixed set
of predetermined object categories. This restricted form of supervision limits
their generality and usability since additional labeled data is needed to
specify any other visual concept. Learning directly from raw text about images
is a promising alternative which leverages a much broader source of
supervision. We demonstrate that the simple pre-training task of predicting
which caption goes with which image is an efficient and scalable way to learn
SOTA image representations from scratch on a dataset of 400 million (image,
text) pairs collected from the internet. After pre-training, natural language
is used to reference learned visual concepts (or describe new ones) enabling
zero-shot transfer of the model to downstream tasks. We study the performance
of this approach by benchmarking on over 30 different existing computer vision
datasets, spanning tasks such as OCR, action recognition in videos,
geo-localization, and many types of fine-grained object classification. The
model transfers non-trivially to most tasks and is often competitive with a
fully supervised baseline without the need for any dataset specific training.
For instance, we match the accuracy of the original ResNet-50 on ImageNet
zero-shot without needing to use any of the 1.28 million training examples it
was trained on. We release our code and pre-trained model weights at
https://github.com/OpenAI/CLIP.
## CTRL: A Conditional Transformer Language Model for Controllable Generation
- **arXiv id:** 1909.05858v2
- **Title:** CTRL: A Conditional Transformer Language Model for Controllable Generation
- **Authors:** Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, et al.
- **Published Date:** 2019-09-11
- **URL:** http://arxiv.org/abs/1909.05858v2
- **LangChain API Reference:** [langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference.html#langchain_community.llms.huggingface_text_gen_inference.HuggingFaceTextGenInference), [langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint](https://api.python.langchain.com/en/latest/llms/langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint.html#langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint)
**Abstract:** Large-scale language models show promising text generation capabilities, but
users cannot easily control particular aspects of the generated text. We
release CTRL, a 1.63 billion-parameter conditional transformer language model,
trained to condition on control codes that govern style, content, and
task-specific behavior. Control codes were derived from structure that
naturally co-occurs with raw text, preserving the advantages of unsupervised
learning while providing more explicit control over text generation. These
codes also allow CTRL to predict which parts of the training data are most
likely given a sequence. This provides a potential method for analyzing large
amounts of data via model-based source attribution. We have released multiple
full-sized, pretrained versions of CTRL at https://github.com/salesforce/ctrl.
## Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **arXiv id:** 1908.10084v1
- **Title:** Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks
- **Authors:** Nils Reimers, Iryna Gurevych
- **Published Date:** 2019-08-27
- **URL:** http://arxiv.org/abs/1908.10084v1
- **LangChain Documentation:** [docs/integrations/text_embedding/sentence_transformers](https://python.langchain.com/docs/integrations/text_embedding/sentence_transformers)
**Abstract:** BERT (Devlin et al., 2018) and RoBERTa (Liu et al., 2019) has set a new
state-of-the-art performance on sentence-pair regression tasks like semantic
textual similarity (STS). However, it requires that both sentences are fed into
the network, which causes a massive computational overhead: Finding the most
similar pair in a collection of 10,000 sentences requires about 50 million
inference computations (~65 hours) with BERT. The construction of BERT makes it
unsuitable for semantic similarity search as well as for unsupervised tasks
like clustering.
In this publication, we present Sentence-BERT (SBERT), a modification of the
pretrained BERT network that use siamese and triplet network structures to
derive semantically meaningful sentence embeddings that can be compared using
cosine-similarity. This reduces the effort for finding the most similar pair
from 65 hours with BERT / RoBERTa to about 5 seconds with SBERT, while
maintaining the accuracy from BERT.
We evaluate SBERT and SRoBERTa on common STS tasks and transfer learning
tasks, where it outperforms other state-of-the-art sentence embeddings methods.

@ -1,18 +1,10 @@
# Tutorials
## Books and Handbooks
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
# 3rd Party Tutorials
## Tutorials
### [LangChain v 0.1 by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae0gBSJ9T0w7cu7iJZbH3T31)
### [Build with Langchain - Advanced by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae06tclDATrMYY0idsTdLg9v)
### [LangGraph by LangChain.ai](https://www.youtube.com/playlist?list=PLfaIDFEXuae16n2TWUkKq5PgJ0w6Pkwtg)
### [by Greg Kamradt](https://www.youtube.com/playlist?list=PLqZXAkvF1bPNQER9mLmDbntNfSpzdDIU5)
### [by Sam Witteveen](https://www.youtube.com/playlist?list=PL8motc6AQftk1Bs42EW45kwYbyJ4jOdiZ)
### [by James Briggs](https://www.youtube.com/playlist?list=PLIUOU7oqGTLieV9uTIFMm6_4PXg-hlN6F)
@ -20,7 +12,6 @@
### [by Mayo Oshin](https://www.youtube.com/@chatwithdata/search?query=langchain)
### [by 1 little Coder](https://www.youtube.com/playlist?list=PLpdmBGJ6ELUK-v0MK-t4wZmVEbxM5xk6L)
## Courses
### Featured courses on Deeplearning.AI
@ -33,6 +24,7 @@
### Online courses
- [Udemy](https://www.udemy.com/courses/search/?q=langchain)
- [DataCamp](https://www.datacamp.com/courses/developing-llm-applications-with-langchain)
- [Pluralsight](https://www.pluralsight.com/search?q=langchain)
- [Coursera](https://www.coursera.org/search?query=langchain)
- [Maven](https://maven.com/courses?query=langchain)
@ -48,7 +40,11 @@
- [by Rabbitmetrics](https://youtu.be/aywZrzNaKjs)
- [by Ivan Reznikov](https://medium.com/@ivanreznikov/langchain-101-course-updated-668f7b41d6cb)
## [Documentation: Use cases](/docs/how_to#use-cases)
## Books and Handbooks
- [Generative AI with LangChain](https://www.amazon.com/Generative-AI-LangChain-language-ChatGPT/dp/1835083463/ref=sr_1_1?crid=1GMOMH0G7GLR&keywords=generative+ai+with+langchain&qid=1703247181&sprefix=%2Caps%2C298&sr=8-1) by [Ben Auffrath](https://www.amazon.com/stores/Ben-Auffarth/author/B08JQKSZ7D?ref=ap_rdr&store_ref=ap_rdr&isDramIntegrated=true&shoppingPortalEnabled=true), ©️ 2023 Packt Publishing
- [LangChain AI Handbook](https://www.pinecone.io/learn/langchain/) By **James Briggs** and **Francisco Ingham**
- [LangChain Cheatsheet](https://pub.towardsai.net/langchain-cheatsheet-all-secrets-on-a-single-page-8be26b721cde) by **Ivan Reznikov**
---------------------

@ -128,13 +128,14 @@ LangChain provides standard, extendable interfaces and external integrations for
Some components LangChain implements, some components we rely on third-party integrations for, and others are a mix.
### Chat models
Language models that use a sequence of messages as inputs and return chat messages as outputs (as opposed to using plain text).
These are traditionally newer models (older models are generally `LLMs`, see above).
Chat models support the assignment of distinct roles to conversation messages, helping to distinguish messages from the AI, users, and instructions such as system messages.
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input.
This makes them interchangeable with LLMs (and simpler to use).
When a string is passed in as input, it will be converted to a HumanMessage under the hood before being passed to the underlying model.
Although the underlying models are messages in, message out, the LangChain wrappers also allow these models to take a string as input. This means you can easily use chat models in place of LLMs.
When a string is passed in as input, it is converted to a HumanMessage and then passed to the underlying model.
LangChain does not provide any ChatModels, rather we rely on third party integrations.
@ -143,7 +144,14 @@ We have some standardized parameters when constructing ChatModels:
ChatModels also accept other parameters that are specific to that integration.
:::important
**Tool Calling** Some chat models have been fine-tuned for tool calling and provide a dedicated API for tool calling.
Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling.
Please see the [tool calling section](/docs/concepts/#functiontool-calling) for more information.
:::
### LLMs
Language models that takes a string as input and returns a string.
These are traditionally older models (newer models generally are `ChatModels`, see below).
@ -239,7 +247,7 @@ from langchain_core.prompts import ChatPromptTemplate
prompt_template = ChatPromptTemplate.from_messages([
("system", "You are a helpful assistant"),
("user", "Tell me a joke about {topic}"
("user", "Tell me a joke about {topic}")
])
prompt_template.invoke({"topic": "cats"})
@ -409,22 +417,30 @@ Retrievers can be created from vectorstores, but are also broad enough to includ
Retrievers accept a string query as input and return a list of Document's as output.
### Tools
Tools are interfaces that an agent, chain, or LLM can use to interact with the world.
They combine a few things:
Tools are interfaces that an agent, a chain, or a chat model / LLM can use to interact with the world.
A tool consists of the following components:
1. The name of the tool
2. A description of what the tool is
2. A description of what the tool does
3. JSON schema of what the inputs to the tool are
4. The function to call
5. Whether the result of a tool should be returned directly to the user
5. Whether the result of a tool should be returned directly to the user (only relevant for agents)
It is useful to have all this information because this information can be used to build action-taking systems! The name, description, and JSON schema can be used to prompt the LLM so it knows how to specify what action to take, and then the function to call is equivalent to taking that action.
The name, description and JSON schema are provided as context
to the LLM, allowing the LLM to determine how to use the tool
appropriately.
The simpler the input to a tool is, the easier it is for an LLM to be able to use it.
Many agents will only work with tools that have a single string input.
Given a list of available tools and a prompt, an LLM can request
that one or more tools be invoked with appropriate arguments.
Importantly, the name, description, and JSON schema (if used) are all used in the prompt. Therefore, it is really important that they are clear and describe exactly how the tool should be used. You may need to change the default name, description, or JSON schema if the LLM is not understanding how to use the tool.
Generally, when designing tools to be used by a chat model or LLM, it is important to keep in mind the following:
- Chat models that have been fine-tuned for tool calling will be better at tool calling than non-fine-tuned models.
- Non fine-tuned models may not be able to use tools at all, especially if the tools are complex or require multiple tool calls.
- Models will perform better if the tools have well-chosen names, descriptions, and JSON schemas.
- Simpler tools are generally easier for models to use than more complex tools.
### Toolkits
@ -460,6 +476,87 @@ If you are still using AgentExecutor, do not fear: we still have a guide on [how
It is recommended, however, that you start to transition to LangGraph.
In order to assist in this we have put together a [transition guide on how to do so](/docs/how_to/migrate_agent)
### Callbacks
LangChain provides a callbacks system that allows you to hook into the various stages of your LLM application. This is useful for logging, monitoring, streaming, and other tasks.
You can subscribe to these events by using the `callbacks` argument available throughout the API. This argument is list of handler objects, which are expected to implement one or more of the methods described below in more detail.
#### Callback handlers
`CallbackHandlers` are objects that implement the [`CallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) interface, which has a method for each event that can be subscribed to.
The `CallbackManager` will call the appropriate method on each handler when the event is triggered.
```python
class BaseCallbackHandler:
"""Base callback handler that can be used to handle callbacks from langchain."""
def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> Any:
"""Run when LLM starts running."""
def on_chat_model_start(
self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs: Any
) -> Any:
"""Run when Chat Model starts running."""
def on_llm_new_token(self, token: str, **kwargs: Any) -> Any:
"""Run on new LLM token. Only available when streaming is enabled."""
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> Any:
"""Run when LLM ends running."""
def on_llm_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when LLM errors."""
def on_chain_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any
) -> Any:
"""Run when chain starts running."""
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any:
"""Run when chain ends running."""
def on_chain_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when chain errors."""
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
"""Run when tool starts running."""
def on_tool_end(self, output: Any, **kwargs: Any) -> Any:
"""Run when tool ends running."""
def on_tool_error(
self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any
) -> Any:
"""Run when tool errors."""
def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run on agent action."""
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
```
#### Passing callbacks
The `callbacks` property is available on most objects throughout the API (Models, Tools, Agents, etc.) in two different places:
- **Constructor callbacks**: defined in the constructor, e.g. `ChatAnthropic(callbacks=[handler], tags=['a-tag'])`. In this case, the callbacks will be used for all calls made on that object, and will be scoped to that object only.
For example, if you initialize a chat model with constructor callbacks, then use it within a chain, the callbacks will only be invoked for calls to that model.
- **Request callbacks**: passed into the `invoke` method used for issuing a request. In this case, the callbacks will be used for that specific request only, and all sub-requests that it contains (e.g. a call to a sequence that triggers a call to a model, which uses the same handler passed in the `invoke()` method).
In the `invoke()` method, callbacks are passed through the `config` parameter.
## Techniques
### Function/tool calling
@ -494,12 +591,18 @@ receive the tool call, execute it, and return the output to the LLM to inform it
response. LangChain includes a suite of [built-in tools](/docs/integrations/tools/)
and supports several methods for defining your own [custom tools](/docs/how_to/custom_tools).
LangChain provides a standardized interface for tool calling that is consistent across different models.
The standard interface consists of:
* `ChatModel.bind_tools()`: a method for specifying which tools are available for a model to call.
* `AIMessage.tool_calls`: an attribute on the `AIMessage` returned from the model for accessing the tool calls requested by the model.
There are two main use cases for function/tool calling:
- [How to return structured data from an LLM](/docs/how_to/structured_output/)
- [How to use a model to call tools](/docs/how_to/tool_calling/)
### Retrieval
LangChain provides several advanced retrieval types. A full list is below, along with the following information:

@ -6,7 +6,7 @@ sidebar_position: 0.5
If you plan on contributing to LangChain code or documentation, it can be useful
to understand the high level structure of the repository.
LangChain is organized as a [monorep](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
LangChain is organized as a [monorepo](https://en.wikipedia.org/wiki/Monorepo) that contains multiple packages.
Here's the structure visualized as a tree:

@ -66,7 +66,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -16,7 +16,7 @@
"id": "711752cb-4f15-42a3-9838-a0c67f397771",
"metadata": {},
"source": [
"# How to attach runtime arguments to a Runnable\n",
"# How to add default invocation args to a Runnable\n",
"\n",
":::info Prerequisites\n",
"\n",

@ -0,0 +1,171 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to use callbacks in async environments\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"If you are planning to use the async APIs, it is recommended to use and extend [`AsyncCallbackHandler`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.AsyncCallbackHandler.html) to avoid blocking the runloop.\n",
"\n",
"**Note**: if you use a sync `CallbackHandler` while using an async method to run your LLM / Chain / Tool / Agent, it will still work. However, under the hood, it will be called with [`run_in_executor`](https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.run_in_executor) which can cause issues if your `CallbackHandler` is not thread-safe."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"zzzz....\n",
"Hi! I just woke up. Your llm is starting\n",
"Sync handler being called in a `thread_pool_executor`: token: Here\n",
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
"Sync handler being called in a `thread_pool_executor`: token: a\n",
"Sync handler being called in a `thread_pool_executor`: token: little\n",
"Sync handler being called in a `thread_pool_executor`: token: joke\n",
"Sync handler being called in a `thread_pool_executor`: token: for\n",
"Sync handler being called in a `thread_pool_executor`: token: you\n",
"Sync handler being called in a `thread_pool_executor`: token: :\n",
"Sync handler being called in a `thread_pool_executor`: token: \n",
"\n",
"Why\n",
"Sync handler being called in a `thread_pool_executor`: token: can\n",
"Sync handler being called in a `thread_pool_executor`: token: 't\n",
"Sync handler being called in a `thread_pool_executor`: token: a\n",
"Sync handler being called in a `thread_pool_executor`: token: bicycle\n",
"Sync handler being called in a `thread_pool_executor`: token: stan\n",
"Sync handler being called in a `thread_pool_executor`: token: d up\n",
"Sync handler being called in a `thread_pool_executor`: token: by\n",
"Sync handler being called in a `thread_pool_executor`: token: itself\n",
"Sync handler being called in a `thread_pool_executor`: token: ?\n",
"Sync handler being called in a `thread_pool_executor`: token: Because\n",
"Sync handler being called in a `thread_pool_executor`: token: it\n",
"Sync handler being called in a `thread_pool_executor`: token: 's\n",
"Sync handler being called in a `thread_pool_executor`: token: two\n",
"Sync handler being called in a `thread_pool_executor`: token: -\n",
"Sync handler being called in a `thread_pool_executor`: token: tire\n",
"zzzz....\n",
"Hi! I just woke up. Your llm is ending\n"
]
},
{
"data": {
"text/plain": [
"LLMResult(generations=[[ChatGeneration(text=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", message=AIMessage(content=\"Here's a little joke for you:\\n\\nWhy can't a bicycle stand up by itself? Because it's two-tire\", id='run-8afc89e8-02c0-4522-8480-d96977240bd4-0'))]], llm_output={}, run=[RunInfo(run_id=UUID('8afc89e8-02c0-4522-8480-d96977240bd4'))])"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import asyncio\n",
"from typing import Any, Dict, List\n",
"\n",
"from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.messages import HumanMessage\n",
"from langchain_core.outputs import LLMResult\n",
"\n",
"\n",
"class MyCustomSyncHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" print(f\"Sync handler being called in a `thread_pool_executor`: token: {token}\")\n",
"\n",
"\n",
"class MyCustomAsyncHandler(AsyncCallbackHandler):\n",
" \"\"\"Async callback handler that can be used to handle callbacks from langchain.\"\"\"\n",
"\n",
" async def on_llm_start(\n",
" self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any\n",
" ) -> None:\n",
" \"\"\"Run when chain starts running.\"\"\"\n",
" print(\"zzzz....\")\n",
" await asyncio.sleep(0.3)\n",
" class_name = serialized[\"name\"]\n",
" print(\"Hi! I just woke up. Your llm is starting\")\n",
"\n",
" async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:\n",
" \"\"\"Run when chain ends running.\"\"\"\n",
" print(\"zzzz....\")\n",
" await asyncio.sleep(0.3)\n",
" print(\"Hi! I just woke up. Your llm is ending\")\n",
"\n",
"\n",
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
"# Additionally, we pass in a list with our custom handler\n",
"chat = ChatAnthropic(\n",
" model=\"claude-3-sonnet-20240229\",\n",
" max_tokens=25,\n",
" streaming=True,\n",
" callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()],\n",
")\n",
"\n",
"await chat.agenerate([[HumanMessage(content=\"Tell me a joke\")]])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You've now learned how to create your own custom callback handlers.\n",
"\n",
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,144 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to attach callbacks to a module\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"- [Chaining runnables](/docs/how_to/sequence)\n",
"- [Attach runtime arguments to a Runnable](/docs/how_to/binding)\n",
"\n",
":::\n",
"\n",
"If you are composing a chain of runnables and want to reuse callbacks across multiple executions, you can attach callbacks with the [`.with_config()`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.with_config) method. This saves you the need to pass callbacks in each time you invoke the chain.\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chain RunnableSequence started\n",
"Chain ChatPromptTemplate started\n",
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'))]] llm_output={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0'\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01LjC57hgrmzVhEma4yXdLKF', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-393950f9-79b9-4fd6-ac6e-50d93d75b906-0')"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain_with_callbacks = chain.with_config(callbacks=callbacks)\n",
"\n",
"chain_with_callbacks.invoke({\"number\": \"2\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"The bound callbacks will run for all nested module runs.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to attach callbacks to a chain.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks in at runtime](/docs/how_to/callbacks_runtime)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,136 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass callbacks into a module constructor\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"Most LangChain modules allow you to pass `callbacks` directly into the constructor. In this case, the callbacks will only be called for that instance (and any nested runs).\n",
"\n",
"Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0'))]] llm_output={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01CdKsRmeS9WRb8BWnHDEHm7', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-2d7fdf2a-7405-4e17-97c0-67e6b2a65305-0')"
]
},
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", callbacks=callbacks)\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain.invoke({\"number\": \"2\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see that we only see events from the chat model run - no chain events from the prompt or broader chain.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to pass callbacks into a constructor.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks at runtime](/docs/how_to/callbacks_runtime)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,140 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to pass callbacks in at runtime\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"- [Custom callback handlers](/docs/how_to/custom_callbacks)\n",
"\n",
":::\n",
"\n",
"In many cases, it is advantageous to pass in handlers instead when running the object. When we pass through [`CallbackHandlers`](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) using the `callbacks` keyword arg when executing an run, those callbacks will be issued by all nested objects involved in the execution. For example, when a handler is passed through to an Agent, it will be used for all callbacks related to the agent and all the objects involved in the agent's execution, in this case, the Tools and LLM.\n",
"\n",
"This prevents us from having to manually attach the handlers to each individual nested object. Here's an example:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Chain RunnableSequence started\n",
"Chain ChatPromptTemplate started\n",
"Chain ended, outputs: messages=[HumanMessage(content='What is 1 + 2?')]\n",
"Chat model started\n",
"Chat model ended, response: generations=[[ChatGeneration(text='1 + 2 = 3', message=AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'))]] llm_output={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} run=None\n",
"Chain ended, outputs: content='1 + 2 = 3' response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}} id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0'\n"
]
},
{
"data": {
"text/plain": [
"AIMessage(content='1 + 2 = 3', response_metadata={'id': 'msg_01D8Tt5FdtBk5gLTfBPm2tac', 'model': 'claude-3-sonnet-20240229', 'stop_reason': 'end_turn', 'stop_sequence': None, 'usage': {'input_tokens': 16, 'output_tokens': 13}}, id='run-bb0dddd8-85f3-4e6b-8553-eaa79f859ef8-0')"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import Any, Dict, List\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.messages import BaseMessage\n",
"from langchain_core.outputs import LLMResult\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class LoggingHandler(BaseCallbackHandler):\n",
" def on_chat_model_start(\n",
" self, serialized: Dict[str, Any], messages: List[List[BaseMessage]], **kwargs\n",
" ) -> None:\n",
" print(\"Chat model started\")\n",
"\n",
" def on_llm_end(self, response: LLMResult, **kwargs) -> None:\n",
" print(f\"Chat model ended, response: {response}\")\n",
"\n",
" def on_chain_start(\n",
" self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs\n",
" ) -> None:\n",
" print(f\"Chain {serialized.get('name')} started\")\n",
"\n",
" def on_chain_end(self, outputs: Dict[str, Any], **kwargs) -> None:\n",
" print(f\"Chain ended, outputs: {outputs}\")\n",
"\n",
"\n",
"callbacks = [LoggingHandler()]\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")\n",
"prompt = ChatPromptTemplate.from_template(\"What is 1 + {number}?\")\n",
"\n",
"chain = prompt | llm\n",
"\n",
"chain.invoke({\"number\": \"2\"}, config={\"callbacks\": callbacks})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"If there are already existing callbacks associated with a module, these will run in addition to any passed in at runtime.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to pass callbacks at runtime.\n",
"\n",
"Next, check out the other how-to guides in this section, such as how to [pass callbacks into a module constructor](/docs/how_to/custom_callbacks)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -0,0 +1,141 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to create custom callback handlers\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [Callbacks](/docs/concepts/#callbacks)\n",
"\n",
":::\n",
"\n",
"LangChain has some built-in callback handlers, but you will often want to create your own handlers with custom logic.\n",
"\n",
"To create a custom callback handler, we need to determine the [event(s)](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) we want our callback handler to handle as well as what we want our callback handler to do when the event is triggered. Then all we need to do is attach the callback handler to the object, for example via [the constructor](/docs/how_to/callbacks_constructor) or [at runtime](/docs/how_to/callbacks_runtime).\n",
"\n",
"In the example below, we'll implement streaming with a custom handler.\n",
"\n",
"In our custom callback handler `MyCustomHandler`, we implement the `on_llm_new_token` handler to print the token we have just received. We then attach our custom handler to the model object as a constructor callback."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"%pip install -qU langchain langchain_anthropic\n",
"\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"My custom handler, token: Here\n",
"My custom handler, token: 's\n",
"My custom handler, token: a\n",
"My custom handler, token: bear\n",
"My custom handler, token: joke\n",
"My custom handler, token: for\n",
"My custom handler, token: you\n",
"My custom handler, token: :\n",
"My custom handler, token: \n",
"\n",
"Why\n",
"My custom handler, token: di\n",
"My custom handler, token: d the\n",
"My custom handler, token: bear\n",
"My custom handler, token: dissol\n",
"My custom handler, token: ve\n",
"My custom handler, token: in\n",
"My custom handler, token: water\n",
"My custom handler, token: ?\n",
"My custom handler, token: \n",
"Because\n",
"My custom handler, token: it\n",
"My custom handler, token: was\n",
"My custom handler, token: a\n",
"My custom handler, token: polar\n",
"My custom handler, token: bear\n",
"My custom handler, token: !\n"
]
}
],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"from langchain_core.callbacks import BaseCallbackHandler\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"\n",
"class MyCustomHandler(BaseCallbackHandler):\n",
" def on_llm_new_token(self, token: str, **kwargs) -> None:\n",
" print(f\"My custom handler, token: {token}\")\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([\"Tell me a joke about {animal}\"])\n",
"\n",
"# To enable streaming, we pass in `streaming=True` to the ChatModel constructor\n",
"# Additionally, we pass in our custom handler as a list to the callbacks parameter\n",
"model = ChatAnthropic(\n",
" model=\"claude-3-sonnet-20240229\", streaming=True, callbacks=[MyCustomHandler()]\n",
")\n",
"\n",
"chain = prompt | model\n",
"\n",
"response = chain.invoke({\"animal\": \"bears\"})"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"You can see [this reference page](https://api.python.langchain.com/en/latest/callbacks/langchain_core.callbacks.base.BaseCallbackHandler.html#langchain-core-callbacks-base-basecallbackhandler) for a list of events you can handle. Note that the `handle_chain_*` events run for most LCEL runnables.\n",
"\n",
"## Next steps\n",
"\n",
"You've now learned how to create your own custom callback handlers.\n",
"\n",
"Next, check out the other how-to guides in this section, such as [how to attach callbacks to a runnable](/docs/how_to/callbacks_attach)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

@ -5,35 +5,29 @@
"id": "5436020b",
"metadata": {},
"source": [
"# How to create custom Tools\n",
"# How to create custom tools\n",
"\n",
"When constructing your own agent, you will need to provide it with a list of Tools that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
"When constructing an agent, you will need to provide it with a list of `Tool`s that it can use. Besides the actual function that is called, the Tool consists of several components:\n",
"\n",
"- `name` (str), is required and must be unique within a set of tools provided to an agent\n",
"- `description` (str), is optional but recommended, as it is used by an agent to determine tool use\n",
"- `args_schema` (Pydantic BaseModel), is optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters.\n",
"| Attribute | Type | Description |\n",
"|-----------------|---------------------------|------------------------------------------------------------------------------------------------------------------|\n",
"| name | str | Must be unique within a set of tools provided to an LLM or agent. |\n",
"| description | str | Describes what the tool does. Used as context by the LLM or agent. |\n",
"| args_schema | Pydantic BaseModel | Optional but recommended, can be used to provide more information (e.g., few-shot examples) or validation for expected parameters |\n",
"| return_direct | boolean | Only relevant for agents. When True, after invoking the given tool, the agent will stop and return the result direcly to the user. |\n",
"\n",
"LangChain provides 3 ways to create tools:\n",
"\n",
"There are multiple ways to define a tool. In this guide, we will walk through how to do for two functions:\n",
"1. Using [@tool decorator](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.tool.html#langchain_core.tools.tool) -- the simplest way to define a custom tool.\n",
"2. Using [StructuredTool.from_function](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.StructuredTool.html#langchain_core.tools.StructuredTool.from_function) class method -- this is similar to the `@tool` decorator, but allows more configuration and specification of both sync and async implementations.\n",
"3. By sub-classing from [BaseTool](https://api.python.langchain.com/en/latest/tools/langchain_core.tools.BaseTool.html) -- This is the most flexible method, it provides the largest degree of control, at the expense of more effort and code.\n",
"\n",
"1. A made up search function that always returns the string \"LangChain\"\n",
"2. A multiplier function that will multiply two numbers by eachother\n",
"The `@tool` or the `StructuredTool.from_function` class method should be sufficient for most use cases.\n",
"\n",
"The biggest difference here is that the first function only requires one input, while the second one requires multiple. Many agents only work with functions that require single inputs, so it's important to know how to work with those. For the most part, defining these custom tools is the same, but there are some differences."
]
},
{
"cell_type": "code",
"execution_count": 37,
"id": "1aaba18c",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"# Import things that are needed generically\n",
"from langchain.pydantic_v1 import BaseModel, Field\n",
"from langchain.tools import BaseTool, StructuredTool, tool"
":::{.callout-tip}\n",
"\n",
"Models will perform better if the tools have well chosen names, descriptions and JSON schemas.\n",
":::"
]
},
{
@ -48,136 +42,216 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "b0ce7de8",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def search(query: str) -> str:\n",
" \"\"\"Look up things online.\"\"\"\n",
" return \"LangChain\""
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "e889fa34",
"execution_count": 1,
"id": "cc7005cd-072f-4d37-8453-6297468e5192",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"search\n",
"search(query: str) -> str - Look up things online.\n",
"{'query': {'title': 'Query', 'type': 'string'}}\n"
"multiply\n",
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
]
}
],
"source": [
"print(search.name)\n",
"print(search.description)\n",
"print(search.args)"
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"# Let's inspect some of the attributes associated with the tool.\n",
"print(multiply.name)\n",
"print(multiply.description)\n",
"print(multiply.args)"
]
},
{
"cell_type": "markdown",
"id": "96698b67-993a-4c97-b867-333132e1eb14",
"metadata": {},
"source": [
"Or create an **async** implementation, like this:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0b9694d9",
"execution_count": 2,
"id": "0c0991db-b997-4611-be37-4346e660506b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(a: int, b: int) -> int:\n",
"async def amultiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b"
]
},
{
"cell_type": "markdown",
"id": "98d6eee9",
"metadata": {},
"source": [
"You can also customize the tool name and JSON args by passing them into the tool decorator."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "d7f9395b",
"execution_count": 3,
"id": "9216d03a-f6ea-4216-b7e1-0661823a4c0b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"multiply\n",
"multiply(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'type': 'integer'}, 'b': {'title': 'B', 'type': 'integer'}}\n"
"multiplication-tool\n",
"multiplication-tool(a: int, b: int) -> int - Multiply two numbers.\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"True\n"
]
}
],
"source": [
"from langchain.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
" a: int = Field(description=\"first number\")\n",
" b: int = Field(description=\"second number\")\n",
"\n",
"\n",
"@tool(\"multiplication-tool\", args_schema=CalculatorInput, return_direct=True)\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"# Let's inspect some of the attributes associated with the tool.\n",
"print(multiply.name)\n",
"print(multiply.description)\n",
"print(multiply.args)"
"print(multiply.args)\n",
"print(multiply.return_direct)"
]
},
{
"cell_type": "markdown",
"id": "98d6eee9",
"id": "b63fcc3b",
"metadata": {},
"source": [
"You can also customize the tool name and JSON args by passing them into the tool decorator."
"## StructuredTool\n",
"\n",
"The `StrurcturedTool.from_function` class method provides a bit more configurability than the `@tool` decorator, without requiring much additional code."
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "dbbf4b6c",
"execution_count": 4,
"id": "564fbe6f-11df-402d-b135-ef6ff25e1e63",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6\n",
"10\n"
]
}
],
"source": [
"class SearchInput(BaseModel):\n",
" query: str = Field(description=\"should be a search query\")\n",
"from langchain_core.tools import StructuredTool\n",
"\n",
"\n",
"@tool(\"search-tool\", args_schema=SearchInput, return_direct=True)\n",
"def search(query: str) -> str:\n",
" \"\"\"Look up things online.\"\"\"\n",
" return \"LangChain\""
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"async def amultiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"calculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)\n",
"\n",
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
"print(await calculator.ainvoke({\"a\": 2, \"b\": 5}))"
]
},
{
"cell_type": "markdown",
"id": "26b3712a-b38d-4582-b6e6-bc7cfb1d6680",
"metadata": {},
"source": [
"To configure it:"
]
},
{
"cell_type": "code",
"execution_count": 44,
"id": "5950ce32",
"execution_count": 5,
"id": "6bc055d4-1fbe-4db5-8881-9c382eba6b1b",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"search-tool\n",
"search-tool(query: str) -> str - Look up things online.\n",
"{'query': {'title': 'Query', 'description': 'should be a search query', 'type': 'string'}}\n",
"True\n"
"6\n",
"Calculator\n",
"Calculator(a: int, b: int) -> int - multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
]
}
],
"source": [
"print(search.name)\n",
"print(search.description)\n",
"print(search.args)\n",
"print(search.return_direct)"
"class CalculatorInput(BaseModel):\n",
" a: int = Field(description=\"first number\")\n",
" b: int = Field(description=\"second number\")\n",
"\n",
"\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"calculator = StructuredTool.from_function(\n",
" func=multiply,\n",
" name=\"Calculator\",\n",
" description=\"multiply numbers\",\n",
" args_schema=CalculatorInput,\n",
" return_direct=True,\n",
" # coroutine= ... <- you can specify an async method if desired as well\n",
")\n",
"\n",
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
"print(calculator.name)\n",
"print(calculator.description)\n",
"print(calculator.args)"
]
},
{
"cell_type": "markdown",
"id": "9d11e80c",
"id": "b840074b-9c10-4ca0-aed8-626c52b2398f",
"metadata": {},
"source": [
"## Subclass BaseTool\n",
"\n",
"You can also explicitly define a custom tool by subclassing the BaseTool class. This provides maximal control over the tool definition, but is a bit more work."
"You can define a custom tool by sub-classing from `BaseTool`. This provides maximal control over the tool definition, but requires writing more code."
]
},
{
"cell_type": "code",
"execution_count": 45,
"execution_count": 16,
"id": "1dad8f8e",
"metadata": {},
"outputs": [],
@ -188,10 +262,8 @@
" AsyncCallbackManagerForToolRun,\n",
" CallbackManagerForToolRun,\n",
")\n",
"\n",
"\n",
"class SearchInput(BaseModel):\n",
" query: str = Field(description=\"should be a search query\")\n",
"from langchain.pydantic_v1 import BaseModel\n",
"from langchain_core.tools import BaseTool\n",
"\n",
"\n",
"class CalculatorInput(BaseModel):\n",
@ -199,24 +271,6 @@
" b: int = Field(description=\"second number\")\n",
"\n",
"\n",
"class CustomSearchTool(BaseTool):\n",
" name = \"custom_search\"\n",
" description = \"useful for when you need to answer questions about current events\"\n",
" args_schema: Type[BaseModel] = SearchInput\n",
"\n",
" def _run(\n",
" self, query: str, run_manager: Optional[CallbackManagerForToolRun] = None\n",
" ) -> str:\n",
" \"\"\"Use the tool.\"\"\"\n",
" return \"LangChain\"\n",
"\n",
" async def _arun(\n",
" self, query: str, run_manager: Optional[AsyncCallbackManagerForToolRun] = None\n",
" ) -> str:\n",
" \"\"\"Use the tool asynchronously.\"\"\"\n",
" raise NotImplementedError(\"custom_search does not support async\")\n",
"\n",
"\n",
"class CustomCalculatorTool(BaseTool):\n",
" name = \"Calculator\"\n",
" description = \"useful for when you need to answer questions about math\"\n",
@ -236,35 +290,17 @@
" run_manager: Optional[AsyncCallbackManagerForToolRun] = None,\n",
" ) -> str:\n",
" \"\"\"Use the tool asynchronously.\"\"\"\n",
" raise NotImplementedError(\"Calculator does not support async\")"
" # If the calculation is cheap, you can just delegate to the sync implementation\n",
" # as shown below.\n",
" # If the sync calculation is expensive, you should delete the entire _arun method.\n",
" # LangChain will automatically provide a better implementation that will\n",
" # kick off the task in a thread to make sure it doesn't block other async code.\n",
" return self._run(a, b, run_manager=run_manager.get_sync())"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "89933e27",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"custom_search\n",
"useful for when you need to answer questions about current events\n",
"{'query': {'title': 'Query', 'description': 'should be a search query', 'type': 'string'}}\n"
]
}
],
"source": [
"search = CustomSearchTool()\n",
"print(search.name)\n",
"print(search.description)\n",
"print(search.args)"
]
},
{
"cell_type": "code",
"execution_count": 48,
"execution_count": 7,
"id": "bb551c33",
"metadata": {},
"outputs": [
@ -275,7 +311,9 @@
"Calculator\n",
"useful for when you need to answer questions about math\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n",
"True\n"
"True\n",
"6\n",
"6\n"
]
}
],
@ -284,80 +322,82 @@
"print(multiply.name)\n",
"print(multiply.description)\n",
"print(multiply.args)\n",
"print(multiply.return_direct)"
"print(multiply.return_direct)\n",
"\n",
"print(multiply.invoke({\"a\": 2, \"b\": 3}))\n",
"print(await multiply.ainvoke({\"a\": 2, \"b\": 3}))"
]
},
{
"cell_type": "markdown",
"id": "b63fcc3b",
"id": "97aba6cc-4bdf-4fab-aff3-d89e7d9c3a09",
"metadata": {},
"source": [
"## StructuredTool dataclass\n",
"## How to create async tools\n",
"\n",
"You can also use a `StructuredTool` dataclass. This methods is a mix between the previous two. It's more convenient than inheriting from the BaseTool class, but provides more functionality than just using a decorator."
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "56ff7670",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"def search_function(query: str):\n",
" return \"LangChain\"\n",
"LangChain Tools implement the [Runnable interface 🏃](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html).\n",
"\n",
"All Runnables expose the `invoke` and `ainvoke` methods (as well as other methods like `batch`, `abatch`, `astream` etc).\n",
"\n",
"search = StructuredTool.from_function(\n",
" func=search_function,\n",
" name=\"Search\",\n",
" description=\"useful for when you need to answer questions about current events\",\n",
" # coroutine= ... <- you can specify an async method if desired as well\n",
")"
"So even if you only provide an `sync` implementation of a tool, you could still use the `ainvoke` interface, but there\n",
"are some important things to know:\n",
"\n",
"* LangChain's by default provides an async implementation that assumes that the function is expensive to compute, so it'll delegate execution to another thread.\n",
"* If you're working in an async codebase, you should create async tools rather than sync tools, to avoid incuring a small overhead due to that thread.\n",
"* If you need both sync and async implementations, use `StructuredTool.from_function` or sub-class from `BaseTool`.\n",
"* If implementing both sync and async, and the sync code is fast to run, override the default LangChain async implementation and simply call the sync code.\n",
"* You CANNOT and SHOULD NOT use the sync `invoke` with an `async` tool."
]
},
{
"cell_type": "code",
"execution_count": 38,
"id": "d3fd3896",
"execution_count": 8,
"id": "6615cb77-fd4c-4676-8965-f92cc71d4944",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Search\n",
"Search(query: str) - useful for when you need to answer questions about current events\n",
"{'query': {'title': 'Query', 'type': 'string'}}\n"
"6\n",
"10\n"
]
}
],
"source": [
"print(search.name)\n",
"print(search.description)\n",
"print(search.args)"
]
},
{
"cell_type": "markdown",
"id": "e9b560f7",
"metadata": {},
"source": [
"You can also define a custom `args_schema` to provide more information about inputs."
"from langchain_core.tools import StructuredTool\n",
"\n",
"\n",
"def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"calculator = StructuredTool.from_function(func=multiply)\n",
"\n",
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
"print(\n",
" await calculator.ainvoke({\"a\": 2, \"b\": 5})\n",
") # Uses default LangChain async implementation incurs small overhead"
]
},
{
"cell_type": "code",
"execution_count": 41,
"id": "712c1967",
"execution_count": 9,
"id": "bb2af583-eadd-41f4-a645-bf8748bd3dcd",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"6\n",
"10\n"
]
}
],
"source": [
"class CalculatorInput(BaseModel):\n",
" a: int = Field(description=\"first number\")\n",
" b: int = Field(description=\"second number\")\n",
"from langchain_core.tools import StructuredTool\n",
"\n",
"\n",
"def multiply(a: int, b: int) -> int:\n",
@ -365,185 +405,191 @@
" return a * b\n",
"\n",
"\n",
"calculator = StructuredTool.from_function(\n",
" func=multiply,\n",
" name=\"Calculator\",\n",
" description=\"multiply numbers\",\n",
" args_schema=CalculatorInput,\n",
" return_direct=True,\n",
" # coroutine= ... <- you can specify an async method if desired as well\n",
")"
"async def amultiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"calculator = StructuredTool.from_function(func=multiply, coroutine=amultiply)\n",
"\n",
"print(calculator.invoke({\"a\": 2, \"b\": 3}))\n",
"print(\n",
" await calculator.ainvoke({\"a\": 2, \"b\": 5})\n",
") # Uses use provided amultiply without additional overhead"
]
},
{
"cell_type": "markdown",
"id": "c80ffdaa-e4ba-4a70-8500-32bf4f60cc1a",
"metadata": {},
"source": [
"You should not and cannot use `.invoke` when providing only an async definition."
]
},
{
"cell_type": "code",
"execution_count": 42,
"id": "f634081e",
"execution_count": 10,
"id": "4ad0932c-8610-4278-8c57-f9218f654c8a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Calculator\n",
"Calculator(a: int, b: int) -> int - multiply numbers\n",
"{'a': {'title': 'A', 'description': 'first number', 'type': 'integer'}, 'b': {'title': 'B', 'description': 'second number', 'type': 'integer'}}\n"
"Raised not implemented error. You should not be doing this.\n"
]
}
],
"source": [
"print(calculator.name)\n",
"print(calculator.description)\n",
"print(calculator.args)"
"@tool\n",
"async def multiply(a: int, b: int) -> int:\n",
" \"\"\"Multiply two numbers.\"\"\"\n",
" return a * b\n",
"\n",
"\n",
"try:\n",
" multiply.invoke({\"a\": 2, \"b\": 3})\n",
"except NotImplementedError:\n",
" print(\"Raised not implemented error. You should not be doing this.\")"
]
},
{
"cell_type": "markdown",
"id": "f1da459d",
"id": "f9c746a7-88d7-4afb-bcb8-0e98b891e8b6",
"metadata": {},
"source": [
"## Handling Tool Errors \n",
"When a tool encounters an error and the exception is not caught, the agent will stop executing. If you want the agent to continue execution, you can raise a `ToolException` and set `handle_tool_error` accordingly. \n",
"\n",
"When `ToolException` is thrown, the agent will not stop working, but will handle the exception according to the `handle_tool_error` variable of the tool, and the processing result will be returned to the agent as observation, and printed in red.\n",
"If you're using tools with agents, you will likely need an error handling strategy, so the agent can recover from the error and continue execution.\n",
"\n",
"A simple strategy is to throw a `ToolException` from inside the tool and specify an error handler using `handle_tool_error`. \n",
"\n",
"You can set `handle_tool_error` to `True`, set it a unified string value, or set it as a function. If it's set as a function, the function should take a `ToolException` as a parameter and return a `str` value.\n",
"When the error handler is specified, the exception will be caught and the error handler will decide which output to return from the tool.\n",
"\n",
"You can set `handle_tool_error` to `True`, a string value, or a function. If it's a function, the function should take a `ToolException` as a parameter and return a value.\n",
"\n",
"Please note that only raising a `ToolException` won't be effective. You need to first set the `handle_tool_error` of the tool because its default value is `False`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "f8bf4668",
"execution_count": 11,
"id": "7094c0e8-6192-4870-a942-aad5b5ae48fd",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import ToolException\n",
"\n",
"\n",
"def search_tool1(s: str):\n",
" raise ToolException(\"The search tool1 is not available.\")"
"def get_weather(city: str) -> int:\n",
" \"\"\"Get weather for the given city.\"\"\"\n",
" raise ToolException(f\"Error: There is no city by the name of {city}.\")"
]
},
{
"cell_type": "markdown",
"id": "7fb56757",
"id": "9d93b217-1d44-4d31-8956-db9ea680ff4f",
"metadata": {},
"source": [
"First, let's see what happens if we don't set `handle_tool_error` - it will error."
"Here's an example with the default `handle_tool_error=True` behavior."
]
},
{
"cell_type": "code",
"execution_count": 58,
"id": "f3dfbcb0",
"execution_count": 12,
"id": "b4d22022-b105-4ccc-a15b-412cb9ea3097",
"metadata": {},
"outputs": [
{
"ename": "ToolException",
"evalue": "The search tool1 is not available.",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mToolException\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[58], line 7\u001b[0m\n\u001b[1;32m 1\u001b[0m search \u001b[38;5;241m=\u001b[39m StructuredTool\u001b[38;5;241m.\u001b[39mfrom_function(\n\u001b[1;32m 2\u001b[0m func\u001b[38;5;241m=\u001b[39msearch_tool1,\n\u001b[1;32m 3\u001b[0m name\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSearch_tool1\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[1;32m 4\u001b[0m description\u001b[38;5;241m=\u001b[39mdescription,\n\u001b[1;32m 5\u001b[0m )\n\u001b[0;32m----> 7\u001b[0m \u001b[43msearch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mtest\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:344\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, **kwargs)\u001b[0m\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error:\n\u001b[1;32m 343\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_tool_error(e)\n\u001b[0;32m--> 344\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m e\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error, \u001b[38;5;28mbool\u001b[39m):\n\u001b[1;32m 346\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e\u001b[38;5;241m.\u001b[39margs:\n",
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:337\u001b[0m, in \u001b[0;36mBaseTool.run\u001b[0;34m(self, tool_input, verbose, start_color, color, callbacks, tags, metadata, run_name, **kwargs)\u001b[0m\n\u001b[1;32m 334\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 335\u001b[0m tool_args, tool_kwargs \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_to_args_and_kwargs(parsed_input)\n\u001b[1;32m 336\u001b[0m observation \u001b[38;5;241m=\u001b[39m (\n\u001b[0;32m--> 337\u001b[0m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_run\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtool_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mtool_kwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 338\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_arg_supported\n\u001b[1;32m 339\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_run(\u001b[38;5;241m*\u001b[39mtool_args, \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mtool_kwargs)\n\u001b[1;32m 340\u001b[0m )\n\u001b[1;32m 341\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m ToolException \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 342\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhandle_tool_error:\n",
"File \u001b[0;32m~/workplace/langchain/libs/core/langchain_core/tools.py:631\u001b[0m, in \u001b[0;36mStructuredTool._run\u001b[0;34m(self, run_manager, *args, **kwargs)\u001b[0m\n\u001b[1;32m 622\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc:\n\u001b[1;32m 623\u001b[0m new_argument_supported \u001b[38;5;241m=\u001b[39m signature(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc)\u001b[38;5;241m.\u001b[39mparameters\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mcallbacks\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 624\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m (\n\u001b[1;32m 625\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfunc(\n\u001b[1;32m 626\u001b[0m \u001b[38;5;241m*\u001b[39margs,\n\u001b[1;32m 627\u001b[0m callbacks\u001b[38;5;241m=\u001b[39mrun_manager\u001b[38;5;241m.\u001b[39mget_child() \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[1;32m 628\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs,\n\u001b[1;32m 629\u001b[0m )\n\u001b[1;32m 630\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m new_argument_supported\n\u001b[0;32m--> 631\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 632\u001b[0m )\n\u001b[1;32m 633\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool does not support sync\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"Cell \u001b[0;32mIn[55], line 5\u001b[0m, in \u001b[0;36msearch_tool1\u001b[0;34m(s)\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21msearch_tool1\u001b[39m(s: \u001b[38;5;28mstr\u001b[39m):\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m ToolException(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThe search tool1 is not available.\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n",
"\u001b[0;31mToolException\u001b[0m: The search tool1 is not available."
]
"data": {
"text/plain": [
"'Error: There is no city by the name of foobar.'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"search = StructuredTool.from_function(\n",
" func=search_tool1,\n",
" name=\"Search_tool1\",\n",
" description=\"A bad tool\",\n",
"get_weather_tool = StructuredTool.from_function(\n",
" func=get_weather,\n",
" handle_tool_error=True,\n",
")\n",
"\n",
"search.run(\"test\")"
"get_weather_tool.invoke({\"city\": \"foobar\"})"
]
},
{
"cell_type": "markdown",
"id": "d2475acd",
"id": "f91d6dc0-3271-4adc-a155-21f2e62ffa56",
"metadata": {},
"source": [
"Now, let's set `handle_tool_error` to be True"
"We can set `handle_tool_error` to a string that will always be returned."
]
},
{
"cell_type": "code",
"execution_count": 59,
"id": "ab81e0f0",
"execution_count": 13,
"id": "3fad1728-d367-4e1b-9b54-3172981271cf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'The search tool1 is not available.'"
"\"There is no such city, but it's probably above 0K there!\""
]
},
"execution_count": 59,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"search = StructuredTool.from_function(\n",
" func=search_tool1,\n",
" name=\"Search_tool1\",\n",
" description=\"A bad tool\",\n",
" handle_tool_error=True,\n",
"get_weather_tool = StructuredTool.from_function(\n",
" func=get_weather,\n",
" handle_tool_error=\"There is no such city, but it's probably above 0K there!\",\n",
")\n",
"\n",
"search.run(\"test\")"
"get_weather_tool.invoke({\"city\": \"foobar\"})"
]
},
{
"cell_type": "markdown",
"id": "dafbbcbe",
"id": "b0a640c1-e08f-4413-83b6-f599f304935f",
"metadata": {},
"source": [
"We can also define a custom way to handle the tool error"
"Handling the error using a function:"
]
},
{
"cell_type": "code",
"execution_count": 60,
"id": "ad16fbcf",
"execution_count": 14,
"id": "ebfe7c1f-318d-4e58-99e1-f31e69473c46",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'The following errors occurred during tool execution:The search tool1 is not available.Please try another tool.'"
"'The following errors occurred during tool execution: `Error: There is no city by the name of foobar.`'"
]
},
"execution_count": 60,
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def _handle_error(error: ToolException) -> str:\n",
" return (\n",
" \"The following errors occurred during tool execution:\"\n",
" + error.args[0]\n",
" + \"Please try another tool.\"\n",
" )\n",
" return f\"The following errors occurred during tool execution: `{error.args[0]}`\"\n",
"\n",
"\n",
"search = StructuredTool.from_function(\n",
" func=search_tool1,\n",
" name=\"Search_tool1\",\n",
" description=\"A bad tool\",\n",
"get_weather_tool = StructuredTool.from_function(\n",
" func=get_weather,\n",
" handle_tool_error=_handle_error,\n",
")\n",
"\n",
"search.run(\"test\")"
"get_weather_tool.invoke({\"city\": \"foobar\"})"
]
}
],
@ -563,7 +609,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.4"
},
"vscode": {
"interpreter": {

@ -0,0 +1,200 @@
{
"cells": [
{
"cell_type": "raw",
"id": "77bf57fb-e990-45f2-8b5f-c76388b05966",
"metadata": {},
"source": [
"---\n",
"keywords: [LCEL]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "50d57bf2-7104-4570-b3e5-90fd71e1bea1",
"metadata": {},
"source": [
"# How to create a dynamic (self-constructing) chain\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following:\n",
"- [LangChain Expression Language (LCEL)](/docs/concepts/#langchain-expression-language)\n",
"- [How to turn any function into a runnable](/docs/how_to/functions)\n",
"\n",
":::\n",
"\n",
"Sometimes we want to construct parts of a chain at runtime, depending on the chain inputs ([routing](/docs/how_to/routing/) is the most common example of this). We can create dynamic chains like this using a very useful property of RunnableLambda's, which is that if a RunnableLambda returns a Runnable, that Runnable is itself invoked. Let's see an example.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs\n",
" customVarName=\"llm\"\n",
"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "406bffc2-86d0-4cb9-9262-5c1e3442397a",
"metadata": {},
"outputs": [],
"source": [
"# | echo: false\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "0ae6692b-983e-40b8-aa2a-6c078d945b9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"\"According to the context provided, Egypt's population in 2024 is estimated to be about 111 million.\""
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.output_parsers import StrOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import Runnable, RunnablePassthrough, chain\n",
"\n",
"contextualize_instructions = \"\"\"Convert the latest user question into a standalone question given the chat history. Don't answer the question, return the question and nothing else (no descriptive text).\"\"\"\n",
"contextualize_prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", contextualize_instructions),\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" (\"human\", \"{question}\"),\n",
" ]\n",
")\n",
"contextualize_question = contextualize_prompt | llm | StrOutputParser()\n",
"\n",
"qa_instructions = (\n",
" \"\"\"Answer the user question given the following context:\\n\\n{context}.\"\"\"\n",
")\n",
"qa_prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", qa_instructions), (\"human\", \"{question}\")]\n",
")\n",
"\n",
"\n",
"@chain\n",
"def contextualize_if_needed(input_: dict) -> Runnable:\n",
" if input_.get(\"chat_history\"):\n",
" # NOTE: This is returning another Runnable, not an actual output.\n",
" return contextualize_question\n",
" else:\n",
" return RunnablePassthrough()\n",
"\n",
"\n",
"@chain\n",
"def fake_retriever(input_: dict) -> str:\n",
" return \"egypt's population in 2024 is about 111 million\"\n",
"\n",
"\n",
"full_chain = (\n",
" RunnablePassthrough.assign(question=contextualize_if_needed).assign(\n",
" context=fake_retriever\n",
" )\n",
" | qa_prompt\n",
" | llm\n",
" | StrOutputParser()\n",
")\n",
"\n",
"full_chain.invoke(\n",
" {\n",
" \"question\": \"what about egypt\",\n",
" \"chat_history\": [\n",
" (\"human\", \"what's the population of indonesia\"),\n",
" (\"ai\", \"about 276 million\"),\n",
" ],\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "5076ddb4-4a99-47ad-b549-8ac27ca3e2c6",
"metadata": {},
"source": [
"The key here is that `contextualize_if_needed` returns another Runnable and not an actual output. This returned Runnable is itself run when the full chain is executed.\n",
"\n",
"Looking at the trace we can see that, since we passed in chat_history, we executed the contextualize_question chain as part of the full chain: https://smith.langchain.com/public/9e0ae34c-4082-4f3f-beed-34a2a2f4c991/r"
]
},
{
"cell_type": "markdown",
"id": "4fe6ca44-a643-4859-a290-be68403f51f0",
"metadata": {},
"source": [
"Note that the streaming, batching, etc. capabilities of the returned Runnable are all preserved"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "6def37fa-5105-4090-9b07-77cb488ecd9c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"What\n",
" is\n",
" the\n",
" population\n",
" of\n",
" Egypt\n",
"?\n"
]
}
],
"source": [
"for chunk in contextualize_if_needed.stream(\n",
" {\n",
" \"question\": \"what about egypt\",\n",
" \"chat_history\": [\n",
" (\"human\", \"what's the population of indonesia\"),\n",
" (\"ai\", \"about 276 million\"),\n",
" ],\n",
" }\n",
"):\n",
" print(chunk)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "poetry-venv-2",
"language": "python",
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,11 +1,21 @@
{
"cells": [
{
"cell_type": "raw",
"id": "018f3868-e60d-4db6-a1c6-c6633c66b1f4",
"metadata": {},
"source": [
"---\n",
"keywords: [LCEL, fallbacks]\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "19c9cbd6",
"metadata": {},
"source": [
"# Fallbacks\n",
"# How to add fallbacks to a runnable\n",
"\n",
"When working with language models, you may often encounter issues from the underlying APIs, whether these be rate limiting or downtime. Therefore, as you go to move your LLM applications into production it becomes more and more important to safeguard against these. That's why we've introduced the concept of fallbacks. \n",
"\n",
@ -447,7 +457,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.5"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -696,7 +696,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -7,35 +7,41 @@ sidebar_class_name: hidden
Here youll find answers to “How do I….?” types of questions.
These guides are *goal-oriented* and *concrete*; they're meant to help you complete a specific task.
For conceptual explanations see [Conceptual Guides](/docs/concepts/).
For conceptual explanations see the [Conceptual guide](/docs/concepts/).
For end-to-end walkthroughs see [Tutorials](/docs/tutorials).
For comprehensive descriptions of every class and function see [API Reference](https://api.python.langchain.com/en/latest/).
For comprehensive descriptions of every class and function see the [API Reference](https://api.python.langchain.com/en/latest/).
## Installation
- [How to: install LangChain packages](/docs/how_to/installation/)
## Key features
This highlights functionality that is core to using LangChain.
- [How to: return structured data from an LLM](/docs/how_to/structured_output/)
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
- [How to: return structured data from a model](/docs/how_to/structured_output/)
- [How to: use a model to call tools](/docs/how_to/tool_calling/)
- [How to: stream runnables](/docs/how_to/streaming)
- [How to: debug your LLM apps](/docs/how_to/debugging/)
## LangChain Expression Language (LCEL)
LangChain Expression Language is a way to create arbitrary custom chains. It is built on the Runnable protocol.
[LangChain Expression Language](/docs/concepts/#langchain-expression-language-lcel) is a way to create arbitrary custom chains. It is built on the [Runnable](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html) protocol.
[**LCEL cheatsheet**](/docs/how_to/lcel_cheatsheet/): For a quick overview of how to use the main LCEL primitives.
- [How to: chain runnables](/docs/how_to/sequence)
- [How to: stream runnables](/docs/how_to/streaming)
- [How to: invoke runnables in parallel](/docs/how_to/parallel/)
- [How to: attach runtime arguments to a runnable](/docs/how_to/binding/)
- [How to: run custom functions](/docs/how_to/functions)
- [How to: pass through arguments from one step to the next](/docs/how_to/passthrough)
- [How to: add values to a chain's state](/docs/how_to/assign)
- [How to: configure a chain at runtime](/docs/how_to/configure)
- [How to: add message history](/docs/how_to/message_history)
- [How to: route execution within a chain](/docs/how_to/routing)
- [How to: add default invocation args to runnables](/docs/how_to/binding/)
- [How to: turn any function into a runnable](/docs/how_to/functions)
- [How to: pass through inputs from one chain step to the next](/docs/how_to/passthrough)
- [How to: configure runnable behavior at runtime](/docs/how_to/configure)
- [How to: add message history (memory) to a chain](/docs/how_to/message_history)
- [How to: route between sub-chains](/docs/how_to/routing)
- [How to: create a dynamic (self-constructing) chain](/docs/how_to/dynamic_chain/)
- [How to: inspect runnables](/docs/how_to/inspect)
- [How to: add fallbacks](/docs/how_to/fallbacks)
- [How to: add fallbacks to a runnable](/docs/how_to/fallbacks)
## Components
@ -162,15 +168,11 @@ Indexing is the process of keeping your vectorstore in-sync with the underlying
LangChain Tools contain a description of the tool (to pass to the language model) as well as the implementation of the function to call).
- [How to: use LangChain tools](/docs/how_to/tools)
- [How to: create custom tools](/docs/how_to/custom_tools)
- [How to: use built-in tools and built-in toolkits](/docs/how_to/tools_builtin)
- [How to: use a chat model to call tools](/docs/how_to/tool_calling/)
- [How to: use LangChain toolkits](/docs/how_to/toolkits)
- [How to: define a custom tool](/docs/how_to/custom_tools)
- [How to: convert LangChain tools to OpenAI functions](/docs/how_to/tools_as_openai_functions)
- [How to: use tools without function calling](/docs/how_to/tools_prompting)
- [How to: let the LLM choose between multiple tools](/docs/how_to/tools_multiple)
- [How to: add ad-hoc tool calling capability to LLMs and chat models](/docs/how_to/tools_prompting)
- [How to: add a human in the loop to tool usage](/docs/how_to/tools_human)
- [How to: do parallel tool use](/docs/how_to/tools_parallel)
- [How to: handle errors when calling tools](/docs/how_to/tools_error)
- [How to: call tools using multi-modal data](/docs/how_to/tool_calls_multi_modal)
@ -185,6 +187,14 @@ For in depth how-to guides for agents, please check out [LangGraph](https://gith
- [How to: use legacy LangChain Agents (AgentExecutor)](/docs/how_to/agent_executor)
- [How to: migrate from legacy LangChain agents to LangGraph](/docs/how_to/migrate_agent)
### Callbacks
- [How to: pass in callbacks at runtime](/docs/how_to/callbacks_runtime)
- [How to: attach callbacks to a module](/docs/how_to/callbacks_attach)
- [How to: pass callbacks into a module constructor](/docs/how_to/callbacks_constructor)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: use callbacks in async environments](/docs/how_to/callbacks_async)
### Custom
All of LangChain components can easily be extended to support your own versions.
@ -194,6 +204,7 @@ All of LangChain components can easily be extended to support your own versions.
- [How to: write a custom retriever class](/docs/how_to/custom_retriever)
- [How to: write a custom document loader](/docs/how_to/document_loader_custom)
- [How to: write a custom output parser class](/docs/how_to/output_parser_custom)
- [How to: create custom callback handlers](/docs/how_to/custom_callbacks)
- [How to: define a custom tool](/docs/how_to/custom_tools)

File diff suppressed because it is too large Load Diff

@ -18,13 +18,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"id": "662fac50",
"metadata": {},
"outputs": [],
"source": [
"%%capture --no-stderr\n",
"%pip install -U langchain-openai langchain langgraph"
"%pip install -U langgraph langchain langchain-openai"
]
},
{
@ -34,12 +34,12 @@
"source": [
"## Basic Usage\n",
"\n",
"First, let's define a model and tool."
"For basic creation and usage of a tool-calling ReAct-style agent, the functionality is the same. First, let's define a model and tool(s), then we'll use those to create an agent."
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 1,
"id": "1e425fea-2796-4b99-bee6-9a6ffe73f756",
"metadata": {},
"outputs": [],
@ -72,7 +72,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 2,
"id": "03ea357c-9c36-4464-b2cc-27bd150e1554",
"metadata": {},
"outputs": [
@ -83,7 +83,7 @@
" 'output': 'The value of `magic_function(3)` is 5.'}"
]
},
"execution_count": 15,
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
@ -119,7 +119,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 3,
"id": "53a3737a-d167-4255-89bf-20ac37f89a3e",
"metadata": {},
"outputs": [
@ -130,7 +130,7 @@
" 'output': 'The value of `magic_function(3)` is 5.'}"
]
},
"execution_count": 16,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
@ -150,7 +150,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 4,
"id": "74ecebe3-512e-409c-a661-bdd5b0a2b782",
"metadata": {},
"outputs": [
@ -158,10 +158,10 @@
"data": {
"text/plain": [
"{'input': 'Pardon?',\n",
" 'output': 'The result of applying the `magic_function` to the input `3` is `5`.'}"
" 'output': 'The result of applying `magic_function` to the input 3 is 5.'}"
]
},
"execution_count": 17,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -200,7 +200,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 5,
"id": "a9a11ccd-75e2-4c11-844d-a34870b0ff91",
"metadata": {},
"outputs": [
@ -211,7 +211,7 @@
" 'output': 'El valor de `magic_function(3)` es 5.'}"
]
},
"execution_count": 18,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -243,7 +243,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 6,
"id": "a9486805-676a-4d19-a5c4-08b41b172989",
"metadata": {},
"outputs": [],
@ -272,20 +272,16 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 7,
"id": "d369ab45-0c82-45f4-9d3e-8efb8dd47e2c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'input': 'what is the value of magic_function(3)?',\n",
" 'output': 'El valor de magic_function(3) es 5. ¡Pandamonium!'}"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"{'input': 'what is the value of magic_function(3)?', 'output': 'El valor de magic_function(3) es 5. ¡Pandamonium!'}\n"
]
}
],
"source": [
@ -310,10 +306,278 @@
"\n",
"\n",
"messages = app.invoke({\"messages\": [(\"human\", query)]})\n",
"{\n",
" \"input\": query,\n",
" \"output\": messages[\"messages\"][-1].content,\n",
"}"
"print(\n",
" {\n",
" \"input\": query,\n",
" \"output\": messages[\"messages\"][-1].content,\n",
" }\n",
")"
]
},
{
"cell_type": "markdown",
"id": "68df3a09",
"metadata": {},
"source": [
"## Memory\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could add chat [Memory](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.memory) so it can engage in a multi-turn conversation."
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "1fb52a2c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hi Polly! The output of the magic function for the input 3 is 5.\n",
"---\n",
"Yes, I remember your name, Polly! How can I assist you further?\n",
"---\n",
"The output of the magic function for the input 3 is 5.\n"
]
}
],
"source": [
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"from langchain.memory import ChatMessageHistory\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables.history import RunnableWithMessageHistory\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4o\")\n",
"memory = ChatMessageHistory(session_id=\"test-session\")\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant.\"),\n",
" # First put the history\n",
" (\"placeholder\", \"{chat_history}\"),\n",
" # Then the new input\n",
" (\"human\", \"{input}\"),\n",
" # Finally the scratchpad\n",
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"@tool\n",
"def magic_function(input: int) -> int:\n",
" \"\"\"Applies a magic function to an input.\"\"\"\n",
" return input + 2\n",
"\n",
"\n",
"tools = [magic_function]\n",
"\n",
"\n",
"agent = create_tool_calling_agent(model, tools, prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
"\n",
"agent_with_chat_history = RunnableWithMessageHistory(\n",
" agent_executor,\n",
" # This is needed because in most real world scenarios, a session id is needed\n",
" # It isn't really used here because we are using a simple in memory ChatMessageHistory\n",
" lambda session_id: memory,\n",
" input_messages_key=\"input\",\n",
" history_messages_key=\"chat_history\",\n",
")\n",
"\n",
"config = {\"configurable\": {\"session_id\": \"test-session\"}}\n",
"print(\n",
" agent_with_chat_history.invoke(\n",
" {\"input\": \"Hi, I'm polly! What's the output of magic_function of 3?\"}, config\n",
" )[\"output\"]\n",
")\n",
"print(\"---\")\n",
"print(agent_with_chat_history.invoke({\"input\": \"Remember my name?\"}, config)[\"output\"])\n",
"print(\"---\")\n",
"print(\n",
" agent_with_chat_history.invoke({\"input\": \"what was that output again?\"}, config)[\n",
" \"output\"\n",
" ]\n",
")"
]
},
{
"cell_type": "markdown",
"id": "c2a5a32f",
"metadata": {},
"source": [
"#### In LangGraph\n",
"\n",
"Memory is just [persistence](https://langchain-ai.github.io/langgraph/how-tos/persistence/), aka [checkpointing](https://langchain-ai.github.io/langgraph/reference/checkpoints/).\n",
"\n",
"Add a `checkpointer` to the agent and you get chat memory for free."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "035e1253",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Hi Polly! The output of the magic_function for the input 3 is 5.\n",
"---\n",
"Yes, your name is Polly!\n",
"---\n",
"The output of the magic_function for the input 3 was 5.\n"
]
}
],
"source": [
"from langchain_core.messages import SystemMessage\n",
"from langgraph.checkpoint import MemorySaver # an in-memory checkpointer\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"system_message = \"You are a helpful assistant.\"\n",
"# This could also be a SystemMessage object\n",
"# system_message = SystemMessage(content=\"You are a helpful assistant. Respond only in Spanish.\")\n",
"\n",
"memory = MemorySaver()\n",
"app = create_react_agent(\n",
" model, tools, messages_modifier=system_message, checkpointer=memory\n",
")\n",
"\n",
"config = {\"configurable\": {\"thread_id\": \"test-thread\"}}\n",
"print(\n",
" app.invoke(\n",
" {\n",
" \"messages\": [\n",
" (\"user\", \"Hi, I'm polly! What's the output of magic_function of 3?\")\n",
" ]\n",
" },\n",
" config,\n",
" )[\"messages\"][-1].content\n",
")\n",
"print(\"---\")\n",
"print(\n",
" app.invoke({\"messages\": [(\"user\", \"Remember my name?\")]}, config)[\"messages\"][\n",
" -1\n",
" ].content\n",
")\n",
"print(\"---\")\n",
"print(\n",
" app.invoke({\"messages\": [(\"user\", \"what was that output again?\")]}, config)[\n",
" \"messages\"\n",
" ][-1].content\n",
")"
]
},
{
"cell_type": "markdown",
"id": "d7cf24a8",
"metadata": {},
"source": [
"## Iterating through steps\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could iterate over the steps using the [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) (or async `astream`) methods or the [iter](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter) method. LangGraph supports stepwise iteration using [stream](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.Runnable.html#langchain_core.runnables.base.Runnable.stream) "
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "d640feb3",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'actions': [ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt')], 'messages': [AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])]}\n",
"{'steps': [AgentStep(action=ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-c68fd76f-a3c3-4c3c-bfd7-748c171ed4b8', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_q9MgGFjqJbV2xSUX93WqxmOt', 'index': 0}])], tool_call_id='call_q9MgGFjqJbV2xSUX93WqxmOt'), observation=5)], 'messages': [FunctionMessage(content='5', name='magic_function')]}\n",
"{'output': 'The value of `magic_function(3)` is 5.', 'messages': [AIMessage(content='The value of `magic_function(3)` is 5.')]}\n"
]
}
],
"source": [
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4o\")\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant.\"),\n",
" (\"human\", \"{input}\"),\n",
" # Placeholders fill up a **list** of messages\n",
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"@tool\n",
"def magic_function(input: int) -> int:\n",
" \"\"\"Applies a magic function to an input.\"\"\"\n",
" return input + 2\n",
"\n",
"\n",
"tools = [magic_function]\n",
"\n",
"agent = create_tool_calling_agent(model, tools, prompt=prompt)\n",
"agent_executor = AgentExecutor(agent=agent, tools=tools)\n",
"\n",
"for step in agent_executor.stream({\"input\": query}):\n",
" print(step)"
]
},
{
"cell_type": "markdown",
"id": "46ccbcbf",
"metadata": {},
"source": [
"#### In LangGraph\n",
"\n",
"In LangGraph, things are handled natively using [stream](https://langchain-ai.github.io/langgraph/reference/graphs/#langgraph.graph.graph.CompiledGraph.stream) or the asynchronous `astream` method."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "86abbe07",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_yTjXXibj76tyFyPRa1soLo0S', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 70, 'total_tokens': 84}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-b275f314-c42e-4e77-9dec-5c23f7dbd53b-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_yTjXXibj76tyFyPRa1soLo0S'}])]}}\n",
"{'tools': {'messages': [ToolMessage(content='5', name='magic_function', id='41c5f227-528d-4483-a313-b03b23b1d327', tool_call_id='call_yTjXXibj76tyFyPRa1soLo0S')]}}\n",
"{'agent': {'messages': [AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 93, 'total_tokens': 107}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-0ef12b6e-415d-4758-9b62-5e5e1b350072-0')]}}\n"
]
}
],
"source": [
"from langchain_core.messages import AnyMessage\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant.\"),\n",
" (\"placeholder\", \"{messages}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"def _modify_messages(messages: list[AnyMessage]):\n",
" return prompt.invoke({\"messages\": messages}).to_messages()\n",
"\n",
"\n",
"app = create_react_agent(model, tools, messages_modifier=_modify_messages)\n",
"\n",
"\n",
"for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"):\n",
" print(step)"
]
},
{
@ -328,7 +592,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 12,
"id": "4eff44bc-a620-4c8a-97b1-268692a842bb",
"metadata": {},
"outputs": [
@ -336,7 +600,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[(ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_lIjE9voYOCFAVoUXSDPQ5bFI', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-7a23003a-ab50-4d7c-b14b-86129d1cacfe', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_lIjE9voYOCFAVoUXSDPQ5bFI'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_lIjE9voYOCFAVoUXSDPQ5bFI', 'index': 0}])], tool_call_id='call_lIjE9voYOCFAVoUXSDPQ5bFI'), 5)]\n"
"[(ToolAgentAction(tool='magic_function', tool_input={'input': 3}, log=\"\\nInvoking: `magic_function` with `{'input': 3}`\\n\\n\\n\", message_log=[AIMessageChunk(content='', additional_kwargs={'tool_calls': [{'index': 0, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'finish_reason': 'tool_calls'}, id='run-837e794f-cfd8-40e0-8abc-4d98ced11b75', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca'}], tool_call_chunks=[{'name': 'magic_function', 'args': '{\"input\":3}', 'id': 'call_ABI4hftfEdnVgKyfF6OzZbca', 'index': 0}])], tool_call_id='call_ABI4hftfEdnVgKyfF6OzZbca'), 5)]\n"
]
}
],
@ -356,20 +620,20 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 13,
"id": "4f4364ea-dffe-4d25-bdce-ef7d0020b880",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'messages': [HumanMessage(content='what is the value of magic_function(3)?', id='8c252eb2-9496-4ad0-b3ae-9ecb2f6c406e'),\n",
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_xmBLOw2pRqB1aRTTiwqEEftW', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-2393b69c-7c52-4771-8bec-aca0e097fcc1-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_xmBLOw2pRqB1aRTTiwqEEftW'}]),\n",
" ToolMessage(content='5', name='magic_function', id='bec0d0f9-bbaf-49fb-b0cb-46a658658f87', tool_call_id='call_xmBLOw2pRqB1aRTTiwqEEftW'),\n",
" AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 87, 'total_tokens': 101}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-5904d36f-b2a4-4f55-b431-12c82992c92c-0')]}"
"{'messages': [HumanMessage(content='what is the value of magic_function(3)?', id='0f63e437-c4d8-4da9-b6f5-b293ebfe4a64'),\n",
" AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_S96v28LlI6hNkQrNnIio0JPh', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-ffef7898-14b1-4537-ad90-7c000a8a5d25-0', tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_S96v28LlI6hNkQrNnIio0JPh'}]),\n",
" ToolMessage(content='5', name='magic_function', id='fbd9df4e-1dda-4d3e-9044-b001f7875476', tool_call_id='call_S96v28LlI6hNkQrNnIio0JPh'),\n",
" AIMessage(content='The value of `magic_function(3)` is 5.', response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 87, 'total_tokens': 101}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None}, id='run-e5d94c54-d9f4-45cd-be8e-a9101a8d88d6-0')]}"
]
},
"execution_count": 23,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@ -400,7 +664,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 14,
"id": "16f189a7-fc78-4cb5-aa16-a94ca06401a6",
"metadata": {},
"outputs": [],
@ -416,7 +680,7 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 15,
"id": "c96aefd7-6f6e-4670-aca6-1ac3d4e7871f",
"metadata": {},
"outputs": [
@ -431,7 +695,11 @@
"Invoking: `magic_function` with `{'input': '3'}`\n",
"\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3mParece que hubo un error al intentar obtener el valor de `magic_function(3)`. ¿Te gustaría que lo intente de nuevo?\u001b[0m\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3m\n",
"Invoking: `magic_function` with `{'input': '3'}`\n",
"responded: Parece que hubo un error al intentar obtener el valor de `magic_function(3)`. Permíteme intentarlo de nuevo.\n",
"\n",
"\u001b[0m\u001b[36;1m\u001b[1;3mSorry, there was an error. Please try again.\u001b[0m\u001b[32;1m\u001b[1;3mAún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?\u001b[0m\n",
"\n",
"\u001b[1m> Finished chain.\u001b[0m\n"
]
@ -440,10 +708,10 @@
"data": {
"text/plain": [
"{'input': 'what is the value of magic_function(3)?',\n",
" 'output': 'Parece que hubo un error al intentar obtener el valor de `magic_function(3)`. ¿Te gustaría que lo intente de nuevo?'}"
" 'output': 'Aún no puedo obtener el valor de `magic_function(3)`. ¿Hay algo más en lo que pueda ayudarte?'}"
]
},
"execution_count": 26,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@ -471,7 +739,7 @@
},
{
"cell_type": "code",
"execution_count": 29,
"execution_count": 16,
"id": "b974a91f-6ae8-4644-83d9-73666258a6db",
"metadata": {},
"outputs": [
@ -480,14 +748,11 @@
"output_type": "stream",
"text": [
"('human', 'what is the value of magic_function(3)?')\n",
"content='' additional_kwargs={'tool_calls': [{'id': 'call_9fMkSAUGRa2BsADwF32ct1m1', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-79084bff-6e10-49bb-b7f0-f613ebcc68ac-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_9fMkSAUGRa2BsADwF32ct1m1'}]\n",
"content='Sorry, there was an error. Please try again.' name='magic_function' id='06f997fd-5309-4d56-afa3-2fe8cbf0d04f' tool_call_id='call_9fMkSAUGRa2BsADwF32ct1m1'\n",
"content='' additional_kwargs={'tool_calls': [{'id': 'call_Fg92zoL8oS5q6im2jR1INRvH', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 97, 'total_tokens': 111}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-fc2e201f-6330-4330-8c4e-1a66e85c1ffa-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_Fg92zoL8oS5q6im2jR1INRvH'}]\n",
"content='Sorry, there was an error. Please try again.' name='magic_function' id='a931dd6e-2ed7-42ea-a58c-5ffb4041d7c9' tool_call_id='call_Fg92zoL8oS5q6im2jR1INRvH'\n",
"content='It seems there is an issue with processing the request for the value of `magic_function(3)`. Let me try a different approach.' additional_kwargs={'tool_calls': [{'id': 'call_lbYBMptprZ6HMqNiTvoqhmwP', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 43, 'prompt_tokens': 130, 'total_tokens': 173}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-2e0baab0-c4c1-42e8-b49d-a2704ae977c0-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_lbYBMptprZ6HMqNiTvoqhmwP'}]\n",
"content='Sorry, there was an error. Please try again.' name='magic_function' id='9957435a-5de3-4662-b23c-abfa31e71208' tool_call_id='call_lbYBMptprZ6HMqNiTvoqhmwP'\n",
"content='It appears that the `magic_function` is currently experiencing issues when attempting to process the input \"3\". Unfortunately, I can\\'t provide the value of `magic_function(3)` at this moment.\\n\\nIf you have any other questions or need assistance with something else, please let me know!' response_metadata={'token_usage': {'completion_tokens': 58, 'prompt_tokens': 195, 'total_tokens': 253}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None} id='run-bb68d7ca-da76-43ad-80ab-23737a70c391-0'\n",
"{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n"
"content='' additional_kwargs={'tool_calls': [{'id': 'call_pFdKcCu5taDTtOOfX14vEDRp', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-25836468-ba7e-43be-a7cf-76bba06a2a08-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_pFdKcCu5taDTtOOfX14vEDRp'}]\n",
"content='Sorry, there was an error. Please try again.' name='magic_function' id='1a08b883-9c7b-4969-9e9b-67ce64cdcb5f' tool_call_id='call_pFdKcCu5taDTtOOfX14vEDRp'\n",
"content='It seems there was an error when trying to apply the magic function. Let me try again.' additional_kwargs={'tool_calls': [{'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 34, 'prompt_tokens': 97, 'total_tokens': 131}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-d571b774-0ea3-4e35-8b7d-f32932c3f3cc-0' tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_DA0lpDIkBFg2GHy4WsEcZG4K'}]\n",
"content='Sorry, there was an error. Please try again.' name='magic_function' id='0b45787b-c82a-487f-9a5a-de129c30460f' tool_call_id='call_DA0lpDIkBFg2GHy4WsEcZG4K'\n",
"content='It appears that there is a consistent issue when trying to apply the magic function to the input \"3.\" This could be due to various reasons, such as the input not being in the correct format or an internal error.\\n\\nIf you have any other questions or if there\\'s something else you\\'d like to try, please let me know!' response_metadata={'token_usage': {'completion_tokens': 66, 'prompt_tokens': 153, 'total_tokens': 219}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'stop', 'logprobs': None} id='run-50a962e6-21b7-4327-8dea-8e2304062627-0'\n"
]
}
],
@ -522,7 +787,7 @@
},
{
"cell_type": "code",
"execution_count": 30,
"execution_count": 17,
"id": "4b8498fc-a7af-4164-a401-d8714f082306",
"metadata": {},
"outputs": [
@ -549,7 +814,7 @@
" 'output': 'Agent stopped due to max iterations.'}"
]
},
"execution_count": 30,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@ -590,7 +855,7 @@
},
{
"cell_type": "code",
"execution_count": 31,
"execution_count": 18,
"id": "a2b29113-e6be-4f91-aa4c-5c63dea3e423",
"metadata": {},
"outputs": [
@ -598,7 +863,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_GlXWTlJ0jQc2B8jQuDVFzmnc', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-38a0459b-a363-4181-b7a3-f25cb5c5d728-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_GlXWTlJ0jQc2B8jQuDVFzmnc'}])]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_HaQkeCwD5QskzJzFixCBacZ4', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-596c9200-771f-436d-8576-72fcb81620f1-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_HaQkeCwD5QskzJzFixCBacZ4'}])]}}\n",
"------\n",
"{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n"
]
@ -624,12 +889,12 @@
"id": "32a9db70",
"metadata": {},
"source": [
"The other way to set a max timeout is just via python's stdlib [asyncio](https://docs.python.org/3/library/asyncio.html)."
"The other way to set a single max timeout for an entire run is to directly use the python stdlib [asyncio](https://docs.python.org/3/library/asyncio.html) library."
]
},
{
"cell_type": "code",
"execution_count": 34,
"execution_count": 19,
"id": "e9eb55f4-a321-4bac-b52d-9e43b411cf92",
"metadata": {},
"outputs": [
@ -637,11 +902,9 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_cR1oJuYcNrOmcaaIRRvh5dSr', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-1c03c5d6-4883-4ccd-aa78-53dbafa99622-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_cR1oJuYcNrOmcaaIRRvh5dSr'}])]}}\n",
"------\n",
"{'action': {'messages': [ToolMessage(content='Sorry, there was an error. Please try again.', name='magic_function', id='596baf13-de35-4a4f-8b78-475b387a1f40', tool_call_id='call_cR1oJuYcNrOmcaaIRRvh5dSr')]}}\n",
"{'agent': {'messages': [AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv', 'function': {'arguments': '{\"input\":\"3\"}', 'name': 'magic_function'}, 'type': 'function'}]}, response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None}, id='run-a1c77db7-405f-43d9-8d57-751f2ca1a58c-0', tool_calls=[{'name': 'magic_function', 'args': {'input': '3'}, 'id': 'call_4agJXUHtmHrOOMogjF6ZuzAv'}])]}}\n",
"------\n",
"{'input': 'what is the value of magic_function(3)?', 'output': 'Task Cancelled.'}\n"
"Task Cancelled.\n"
]
}
],
@ -665,6 +928,290 @@
"except TimeoutError:\n",
" print(\"Task Cancelled.\")"
]
},
{
"cell_type": "markdown",
"id": "4884ac87",
"metadata": {},
"source": [
"## `early_stopping_method`\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.iter), you could configure an [early_stopping_method](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.early_stopping_method) to either return a string saying \"Agent stopped due to iteration limit or time limit.\" (`\"force\"`) or prompt the LLM a final time to respond (`\"generate\"`)."
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "3f6e2cf2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Output with early_stopping_method='force':\n",
"Agent stopped due to max iterations.\n"
]
}
],
"source": [
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4o\")\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant.\"),\n",
" (\"human\", \"{input}\"),\n",
" # Placeholders fill up a **list** of messages\n",
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"@tool\n",
"def magic_function(input: int) -> int:\n",
" \"\"\"Applies a magic function to an input.\"\"\"\n",
" return \"Sorry there was an error, please try again.\"\n",
"\n",
"\n",
"tools = [magic_function]\n",
"\n",
"agent = create_tool_calling_agent(model, tools, prompt=prompt)\n",
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, early_stopping_method=\"force\", max_iterations=1\n",
")\n",
"\n",
"result = agent_executor.invoke({\"input\": query})\n",
"print(\"Output with early_stopping_method='force':\")\n",
"print(result[\"output\"])"
]
},
{
"cell_type": "markdown",
"id": "706e05c4",
"metadata": {},
"source": [
"#### In LangGraph\n",
"\n",
"In LangGraph, you can explicitly handle the response behavior outside the agent, since the full state can be accessed."
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "73cabbc4",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"('human', 'what is the value of magic_function(3)?')\n",
"content='' additional_kwargs={'tool_calls': [{'id': 'call_bTURmOn9C8zslmn0kMFeykIn', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 64, 'total_tokens': 78}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-0844a504-7e6b-4ea6-a069-7017e38121ee-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_bTURmOn9C8zslmn0kMFeykIn'}]\n",
"content='Sorry there was an error, please try again.' name='magic_function' id='00d5386f-eb23-4628-9a29-d9ce6a7098cc' tool_call_id='call_bTURmOn9C8zslmn0kMFeykIn'\n",
"content='' additional_kwargs={'tool_calls': [{'id': 'call_JYqvvvWmXow2u012DuPoDHFV', 'function': {'arguments': '{\"input\":3}', 'name': 'magic_function'}, 'type': 'function'}]} response_metadata={'token_usage': {'completion_tokens': 14, 'prompt_tokens': 96, 'total_tokens': 110}, 'model_name': 'gpt-4o', 'system_fingerprint': 'fp_729ea513f7', 'finish_reason': 'tool_calls', 'logprobs': None} id='run-b73b1b1c-c829-4348-98cd-60b315c85448-0' tool_calls=[{'name': 'magic_function', 'args': {'input': 3}, 'id': 'call_JYqvvvWmXow2u012DuPoDHFV'}]\n",
"{'input': 'what is the value of magic_function(3)?', 'output': 'Agent stopped due to max iterations.'}\n"
]
}
],
"source": [
"from langgraph.errors import GraphRecursionError\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"RECURSION_LIMIT = 2 * 1 + 1\n",
"\n",
"app = create_react_agent(model, tools=tools)\n",
"\n",
"try:\n",
" for chunk in app.stream(\n",
" {\"messages\": [(\"human\", query)]},\n",
" {\"recursion_limit\": RECURSION_LIMIT},\n",
" stream_mode=\"values\",\n",
" ):\n",
" print(chunk[\"messages\"][-1])\n",
"except GraphRecursionError:\n",
" print({\"input\": query, \"output\": \"Agent stopped due to max iterations.\"})"
]
},
{
"cell_type": "markdown",
"id": "017fe20e",
"metadata": {},
"source": [
"## `trim_intermediate_steps`\n",
"\n",
"With LangChain's [AgentExecutor](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor), you could trim the intermediate steps of long-running agents using [trim_intermediate_steps](https://api.python.langchain.com/en/latest/agents/langchain.agents.agent.AgentExecutor.html#langchain.agents.agent.AgentExecutor.trim_intermediate_steps), which is either an integer (indicating the agent should keep the last N steps) or a custom function.\n",
"\n",
"For instance, we could trim the value so the agent only sees the most recent intermediate step."
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "b94bb169",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Call number: 1\n",
"Call number: 2\n",
"Call number: 3\n",
"Call number: 4\n",
"Call number: 5\n",
"Call number: 6\n",
"Call number: 7\n",
"Call number: 8\n",
"Call number: 9\n",
"Call number: 10\n",
"Call number: 11\n",
"Call number: 12\n",
"Call number: 13\n",
"Call number: 14\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Stopping agent prematurely due to triggering stop condition\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Call number: 15\n"
]
}
],
"source": [
"from langchain.agents import AgentExecutor, create_tool_calling_agent\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import tool\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-4o\")\n",
"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", \"You are a helpful assistant.\"),\n",
" (\"human\", \"{input}\"),\n",
" # Placeholders fill up a **list** of messages\n",
" (\"placeholder\", \"{agent_scratchpad}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"magic_step_num = 1\n",
"\n",
"\n",
"@tool\n",
"def magic_function(input: int) -> int:\n",
" \"\"\"Applies a magic function to an input.\"\"\"\n",
" global magic_step_num\n",
" print(f\"Call number: {magic_step_num}\")\n",
" magic_step_num += 1\n",
" return input + magic_step_num\n",
"\n",
"\n",
"tools = [magic_function]\n",
"\n",
"agent = create_tool_calling_agent(model, tools, prompt=prompt)\n",
"\n",
"\n",
"def trim_steps(steps: list):\n",
" # Let's give the agent amnesia\n",
" return []\n",
"\n",
"\n",
"agent_executor = AgentExecutor(\n",
" agent=agent, tools=tools, trim_intermediate_steps=trim_steps\n",
")\n",
"\n",
"\n",
"query = \"Call the magic function 4 times in sequence with the value 3. You cannot call it multiple times at once.\"\n",
"\n",
"for step in agent_executor.stream({\"input\": query}):\n",
" pass"
]
},
{
"cell_type": "markdown",
"id": "3d450c5a",
"metadata": {},
"source": [
"#### In LangGraph\n",
"\n",
"We can use the [`messages_modifier`](https://langchain-ai.github.io/langgraph/reference/prebuilt/#create_react_agent) just as before when passing in [prompt templates](#prompt-templates)."
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "b309ba9a",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Call number: 1\n",
"Call number: 2\n",
"Call number: 3\n",
"Call number: 4\n",
"Call number: 5\n",
"Call number: 6\n",
"Call number: 7\n",
"Call number: 8\n",
"Call number: 9\n",
"Call number: 10\n",
"Call number: 11\n",
"Call number: 12\n",
"Stopping agent prematurely due to triggering stop condition\n"
]
}
],
"source": [
"from langchain_core.messages import AnyMessage\n",
"from langgraph.errors import GraphRecursionError\n",
"from langgraph.prebuilt import create_react_agent\n",
"\n",
"magic_step_num = 1\n",
"\n",
"\n",
"@tool\n",
"def magic_function(input: int) -> int:\n",
" \"\"\"Applies a magic function to an input.\"\"\"\n",
" global magic_step_num\n",
" print(f\"Call number: {magic_step_num}\")\n",
" magic_step_num += 1\n",
" return input + magic_step_num\n",
"\n",
"\n",
"tools = [magic_function]\n",
"\n",
"\n",
"def _modify_messages(messages: list[AnyMessage]):\n",
" # Give the agent amnesia, only keeping the original user query\n",
" return [(\"system\", \"You are a helpful assistant\"), messages[0]]\n",
"\n",
"\n",
"app = create_react_agent(model, tools, messages_modifier=_modify_messages)\n",
"\n",
"try:\n",
" for step in app.stream({\"messages\": [(\"human\", query)]}, stream_mode=\"updates\"):\n",
" pass\n",
"except GraphRecursionError as e:\n",
" print(\"Stopping agent prematurely due to triggering stop condition\")"
]
}
],
"metadata": {

@ -57,7 +57,7 @@
"outputs": [],
"source": [
"loaders = [\n",
" TextLoader(\"../../paul_graham_essay.txt\"),\n",
" TextLoader(\"paul_graham_essay.txt\"),\n",
" TextLoader(\"state_of_the_union.txt\"),\n",
"]\n",
"docs = []\n",
@ -124,8 +124,8 @@
{
"data": {
"text/plain": [
"['cfdf4af7-51f2-4ea3-8166-5be208efa040',\n",
" 'bf213c21-cc66-4208-8a72-733d030187e6']"
"['9a63376c-58cc-42c9-b0f7-61f0e1a3a688',\n",
" '40091598-e918-4a18-9be0-f46413a95ae4']"
]
},
"execution_count": 6,
@ -190,7 +190,7 @@
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
"retrieved_docs = retriever.invoke(\"justice breyer\")"
]
},
{
@ -338,17 +338,17 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 18,
"id": "3a3202df",
"metadata": {},
"outputs": [],
"source": [
"retrieved_docs = retriever.get_relevant_documents(\"justice breyer\")"
"retrieved_docs = retriever.invoke(\"justice breyer\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 19,
"id": "684fdb2c",
"metadata": {},
"outputs": [
@ -358,7 +358,7 @@
"1849"
]
},
"execution_count": 18,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@ -369,7 +369,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 20,
"id": "9f17f662",
"metadata": {},
"outputs": [
@ -424,7 +424,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.10.4"
}
},
"nbformat": 4,

@ -0,0 +1,351 @@
What I Worked On
February 2021
Before college the two main things I worked on, outside of school, were writing and programming. I didn't write essays. I wrote what beginning writers were supposed to write then, and probably still are: short stories. My stories were awful. They had hardly any plot, just characters with strong feelings, which I imagined made them deep.
The first programs I tried writing were on the IBM 1401 that our school district used for what was then called "data processing." This was in 9th grade, so I was 13 or 14. The school district's 1401 happened to be in the basement of our junior high school, and my friend Rich Draves and I got permission to use it. It was like a mini Bond villain's lair down there, with all these alien-looking machines — CPU, disk drives, printer, card reader — sitting up on a raised floor under bright fluorescent lights.
The language we used was an early version of Fortran. You had to type programs on punch cards, then stack them in the card reader and press a button to load the program into memory and run it. The result would ordinarily be to print something on the spectacularly loud printer.
I was puzzled by the 1401. I couldn't figure out what to do with it. And in retrospect there's not much I could have done with it. The only form of input to programs was data stored on punched cards, and I didn't have any data stored on punched cards. The only other option was to do things that didn't rely on any input, like calculate approximations of pi, but I didn't know enough math to do anything interesting of that type. So I'm not surprised I can't remember any programs I wrote, because they can't have done much. My clearest memory is of the moment I learned it was possible for programs not to terminate, when one of mine didn't. On a machine without time-sharing, this was a social as well as a technical error, as the data center manager's expression made clear.
With microcomputers, everything changed. Now you could have a computer sitting right in front of you, on a desk, that could respond to your keystrokes as it was running instead of just churning through a stack of punch cards and then stopping. [1]
The first of my friends to get a microcomputer built it himself. It was sold as a kit by Heathkit. I remember vividly how impressed and envious I felt watching him sitting in front of it, typing programs right into the computer.
Computers were expensive in those days and it took me years of nagging before I convinced my father to buy one, a TRS-80, in about 1980. The gold standard then was the Apple II, but a TRS-80 was good enough. This was when I really started programming. I wrote simple games, a program to predict how high my model rockets would fly, and a word processor that my father used to write at least one book. There was only room in memory for about 2 pages of text, so he'd write 2 pages at a time and then print them out, but it was a lot better than a typewriter.
Though I liked programming, I didn't plan to study it in college. In college I was going to study philosophy, which sounded much more powerful. It seemed, to my naive high school self, to be the study of the ultimate truths, compared to which the things studied in other fields would be mere domain knowledge. What I discovered when I got to college was that the other fields took up so much of the space of ideas that there wasn't much left for these supposed ultimate truths. All that seemed left for philosophy were edge cases that people in other fields felt could safely be ignored.
I couldn't have put this into words when I was 18. All I knew at the time was that I kept taking philosophy courses and they kept being boring. So I decided to switch to AI.
AI was in the air in the mid 1980s, but there were two things especially that made me want to work on it: a novel by Heinlein called The Moon is a Harsh Mistress, which featured an intelligent computer called Mike, and a PBS documentary that showed Terry Winograd using SHRDLU. I haven't tried rereading The Moon is a Harsh Mistress, so I don't know how well it has aged, but when I read it I was drawn entirely into its world. It seemed only a matter of time before we'd have Mike, and when I saw Winograd using SHRDLU, it seemed like that time would be a few years at most. All you had to do was teach SHRDLU more words.
There weren't any classes in AI at Cornell then, not even graduate classes, so I started trying to teach myself. Which meant learning Lisp, since in those days Lisp was regarded as the language of AI. The commonly used programming languages then were pretty primitive, and programmers' ideas correspondingly so. The default language at Cornell was a Pascal-like language called PL/I, and the situation was similar elsewhere. Learning Lisp expanded my concept of a program so fast that it was years before I started to have a sense of where the new limits were. This was more like it; this was what I had expected college to do. It wasn't happening in a class, like it was supposed to, but that was ok. For the next couple years I was on a roll. I knew what I was going to do.
For my undergraduate thesis, I reverse-engineered SHRDLU. My God did I love working on that program. It was a pleasing bit of code, but what made it even more exciting was my belief — hard to imagine now, but not unique in 1985 — that it was already climbing the lower slopes of intelligence.
I had gotten into a program at Cornell that didn't make you choose a major. You could take whatever classes you liked, and choose whatever you liked to put on your degree. I of course chose "Artificial Intelligence." When I got the actual physical diploma, I was dismayed to find that the quotes had been included, which made them read as scare-quotes. At the time this bothered me, but now it seems amusingly accurate, for reasons I was about to discover.
I applied to 3 grad schools: MIT and Yale, which were renowned for AI at the time, and Harvard, which I'd visited because Rich Draves went there, and was also home to Bill Woods, who'd invented the type of parser I used in my SHRDLU clone. Only Harvard accepted me, so that was where I went.
I don't remember the moment it happened, or if there even was a specific moment, but during the first year of grad school I realized that AI, as practiced at the time, was a hoax. By which I mean the sort of AI in which a program that's told "the dog is sitting on the chair" translates this into some formal representation and adds it to the list of things it knows.
What these programs really showed was that there's a subset of natural language that's a formal language. But a very proper subset. It was clear that there was an unbridgeable gap between what they could do and actually understanding natural language. It was not, in fact, simply a matter of teaching SHRDLU more words. That whole way of doing AI, with explicit data structures representing concepts, was not going to work. Its brokenness did, as so often happens, generate a lot of opportunities to write papers about various band-aids that could be applied to it, but it was never going to get us Mike.
So I looked around to see what I could salvage from the wreckage of my plans, and there was Lisp. I knew from experience that Lisp was interesting for its own sake and not just for its association with AI, even though that was the main reason people cared about it at the time. So I decided to focus on Lisp. In fact, I decided to write a book about Lisp hacking. It's scary to think how little I knew about Lisp hacking when I started writing that book. But there's nothing like writing a book about something to help you learn it. The book, On Lisp, wasn't published till 1993, but I wrote much of it in grad school.
Computer Science is an uneasy alliance between two halves, theory and systems. The theory people prove things, and the systems people build things. I wanted to build things. I had plenty of respect for theory — indeed, a sneaking suspicion that it was the more admirable of the two halves — but building things seemed so much more exciting.
The problem with systems work, though, was that it didn't last. Any program you wrote today, no matter how good, would be obsolete in a couple decades at best. People might mention your software in footnotes, but no one would actually use it. And indeed, it would seem very feeble work. Only people with a sense of the history of the field would even realize that, in its time, it had been good.
There were some surplus Xerox Dandelions floating around the computer lab at one point. Anyone who wanted one to play around with could have one. I was briefly tempted, but they were so slow by present standards; what was the point? No one else wanted one either, so off they went. That was what happened to systems work.
I wanted not just to build things, but to build things that would last.
In this dissatisfied state I went in 1988 to visit Rich Draves at CMU, where he was in grad school. One day I went to visit the Carnegie Institute, where I'd spent a lot of time as a kid. While looking at a painting there I realized something that might seem obvious, but was a big surprise to me. There, right on the wall, was something you could make that would last. Paintings didn't become obsolete. Some of the best ones were hundreds of years old.
And moreover this was something you could make a living doing. Not as easily as you could by writing software, of course, but I thought if you were really industrious and lived really cheaply, it had to be possible to make enough to survive. And as an artist you could be truly independent. You wouldn't have a boss, or even need to get research funding.
I had always liked looking at paintings. Could I make them? I had no idea. I'd never imagined it was even possible. I knew intellectually that people made art — that it didn't just appear spontaneously — but it was as if the people who made it were a different species. They either lived long ago or were mysterious geniuses doing strange things in profiles in Life magazine. The idea of actually being able to make art, to put that verb before that noun, seemed almost miraculous.
That fall I started taking art classes at Harvard. Grad students could take classes in any department, and my advisor, Tom Cheatham, was very easy going. If he even knew about the strange classes I was taking, he never said anything.
So now I was in a PhD program in computer science, yet planning to be an artist, yet also genuinely in love with Lisp hacking and working away at On Lisp. In other words, like many a grad student, I was working energetically on multiple projects that were not my thesis.
I didn't see a way out of this situation. I didn't want to drop out of grad school, but how else was I going to get out? I remember when my friend Robert Morris got kicked out of Cornell for writing the internet worm of 1988, I was envious that he'd found such a spectacular way to get out of grad school.
Then one day in April 1990 a crack appeared in the wall. I ran into professor Cheatham and he asked if I was far enough along to graduate that June. I didn't have a word of my dissertation written, but in what must have been the quickest bit of thinking in my life, I decided to take a shot at writing one in the 5 weeks or so that remained before the deadline, reusing parts of On Lisp where I could, and I was able to respond, with no perceptible delay "Yes, I think so. I'll give you something to read in a few days."
I picked applications of continuations as the topic. In retrospect I should have written about macros and embedded languages. There's a whole world there that's barely been explored. But all I wanted was to get out of grad school, and my rapidly written dissertation sufficed, just barely.
Meanwhile I was applying to art schools. I applied to two: RISD in the US, and the Accademia di Belli Arti in Florence, which, because it was the oldest art school, I imagined would be good. RISD accepted me, and I never heard back from the Accademia, so off to Providence I went.
I'd applied for the BFA program at RISD, which meant in effect that I had to go to college again. This was not as strange as it sounds, because I was only 25, and art schools are full of people of different ages. RISD counted me as a transfer sophomore and said I had to do the foundation that summer. The foundation means the classes that everyone has to take in fundamental subjects like drawing, color, and design.
Toward the end of the summer I got a big surprise: a letter from the Accademia, which had been delayed because they'd sent it to Cambridge England instead of Cambridge Massachusetts, inviting me to take the entrance exam in Florence that fall. This was now only weeks away. My nice landlady let me leave my stuff in her attic. I had some money saved from consulting work I'd done in grad school; there was probably enough to last a year if I lived cheaply. Now all I had to do was learn Italian.
Only stranieri (foreigners) had to take this entrance exam. In retrospect it may well have been a way of excluding them, because there were so many stranieri attracted by the idea of studying art in Florence that the Italian students would otherwise have been outnumbered. I was in decent shape at painting and drawing from the RISD foundation that summer, but I still don't know how I managed to pass the written exam. I remember that I answered the essay question by writing about Cezanne, and that I cranked up the intellectual level as high as I could to make the most of my limited vocabulary. [2]
I'm only up to age 25 and already there are such conspicuous patterns. Here I was, yet again about to attend some august institution in the hopes of learning about some prestigious subject, and yet again about to be disappointed. The students and faculty in the painting department at the Accademia were the nicest people you could imagine, but they had long since arrived at an arrangement whereby the students wouldn't require the faculty to teach anything, and in return the faculty wouldn't require the students to learn anything. And at the same time all involved would adhere outwardly to the conventions of a 19th century atelier. We actually had one of those little stoves, fed with kindling, that you see in 19th century studio paintings, and a nude model sitting as close to it as possible without getting burned. Except hardly anyone else painted her besides me. The rest of the students spent their time chatting or occasionally trying to imitate things they'd seen in American art magazines.
Our model turned out to live just down the street from me. She made a living from a combination of modelling and making fakes for a local antique dealer. She'd copy an obscure old painting out of a book, and then he'd take the copy and maltreat it to make it look old. [3]
While I was a student at the Accademia I started painting still lives in my bedroom at night. These paintings were tiny, because the room was, and because I painted them on leftover scraps of canvas, which was all I could afford at the time. Painting still lives is different from painting people, because the subject, as its name suggests, can't move. People can't sit for more than about 15 minutes at a time, and when they do they don't sit very still. So the traditional m.o. for painting people is to know how to paint a generic person, which you then modify to match the specific person you're painting. Whereas a still life you can, if you want, copy pixel by pixel from what you're seeing. You don't want to stop there, of course, or you get merely photographic accuracy, and what makes a still life interesting is that it's been through a head. You want to emphasize the visual cues that tell you, for example, that the reason the color changes suddenly at a certain point is that it's the edge of an object. By subtly emphasizing such things you can make paintings that are more realistic than photographs not just in some metaphorical sense, but in the strict information-theoretic sense. [4]
I liked painting still lives because I was curious about what I was seeing. In everyday life, we aren't consciously aware of much we're seeing. Most visual perception is handled by low-level processes that merely tell your brain "that's a water droplet" without telling you details like where the lightest and darkest points are, or "that's a bush" without telling you the shape and position of every leaf. This is a feature of brains, not a bug. In everyday life it would be distracting to notice every leaf on every bush. But when you have to paint something, you have to look more closely, and when you do there's a lot to see. You can still be noticing new things after days of trying to paint something people usually take for granted, just as you can after days of trying to write an essay about something people usually take for granted.
This is not the only way to paint. I'm not 100% sure it's even a good way to paint. But it seemed a good enough bet to be worth trying.
Our teacher, professor Ulivi, was a nice guy. He could see I worked hard, and gave me a good grade, which he wrote down in a sort of passport each student had. But the Accademia wasn't teaching me anything except Italian, and my money was running out, so at the end of the first year I went back to the US.
I wanted to go back to RISD, but I was now broke and RISD was very expensive, so I decided to get a job for a year and then return to RISD the next fall. I got one at a company called Interleaf, which made software for creating documents. You mean like Microsoft Word? Exactly. That was how I learned that low end software tends to eat high end software. But Interleaf still had a few years to live yet. [5]
Interleaf had done something pretty bold. Inspired by Emacs, they'd added a scripting language, and even made the scripting language a dialect of Lisp. Now they wanted a Lisp hacker to write things in it. This was the closest thing I've had to a normal job, and I hereby apologize to my boss and coworkers, because I was a bad employee. Their Lisp was the thinnest icing on a giant C cake, and since I didn't know C and didn't want to learn it, I never understood most of the software. Plus I was terribly irresponsible. This was back when a programming job meant showing up every day during certain working hours. That seemed unnatural to me, and on this point the rest of the world is coming around to my way of thinking, but at the time it caused a lot of friction. Toward the end of the year I spent much of my time surreptitiously working on On Lisp, which I had by this time gotten a contract to publish.
The good part was that I got paid huge amounts of money, especially by art student standards. In Florence, after paying my part of the rent, my budget for everything else had been $7 a day. Now I was getting paid more than 4 times that every hour, even when I was just sitting in a meeting. By living cheaply I not only managed to save enough to go back to RISD, but also paid off my college loans.
I learned some useful things at Interleaf, though they were mostly about what not to do. I learned that it's better for technology companies to be run by product people than sales people (though sales is a real skill and people who are good at it are really good at it), that it leads to bugs when code is edited by too many people, that cheap office space is no bargain if it's depressing, that planned meetings are inferior to corridor conversations, that big, bureaucratic customers are a dangerous source of money, and that there's not much overlap between conventional office hours and the optimal time for hacking, or conventional offices and the optimal place for it.
But the most important thing I learned, and which I used in both Viaweb and Y Combinator, is that the low end eats the high end: that it's good to be the "entry level" option, even though that will be less prestigious, because if you're not, someone else will be, and will squash you against the ceiling. Which in turn means that prestige is a danger sign.
When I left to go back to RISD the next fall, I arranged to do freelance work for the group that did projects for customers, and this was how I survived for the next several years. When I came back to visit for a project later on, someone told me about a new thing called HTML, which was, as he described it, a derivative of SGML. Markup language enthusiasts were an occupational hazard at Interleaf and I ignored him, but this HTML thing later became a big part of my life.
In the fall of 1992 I moved back to Providence to continue at RISD. The foundation had merely been intro stuff, and the Accademia had been a (very civilized) joke. Now I was going to see what real art school was like. But alas it was more like the Accademia than not. Better organized, certainly, and a lot more expensive, but it was now becoming clear that art school did not bear the same relationship to art that medical school bore to medicine. At least not the painting department. The textile department, which my next door neighbor belonged to, seemed to be pretty rigorous. No doubt illustration and architecture were too. But painting was post-rigorous. Painting students were supposed to express themselves, which to the more worldly ones meant to try to cook up some sort of distinctive signature style.
A signature style is the visual equivalent of what in show business is known as a "schtick": something that immediately identifies the work as yours and no one else's. For example, when you see a painting that looks like a certain kind of cartoon, you know it's by Roy Lichtenstein. So if you see a big painting of this type hanging in the apartment of a hedge fund manager, you know he paid millions of dollars for it. That's not always why artists have a signature style, but it's usually why buyers pay a lot for such work. [6]
There were plenty of earnest students too: kids who "could draw" in high school, and now had come to what was supposed to be the best art school in the country, to learn to draw even better. They tended to be confused and demoralized by what they found at RISD, but they kept going, because painting was what they did. I was not one of the kids who could draw in high school, but at RISD I was definitely closer to their tribe than the tribe of signature style seekers.
I learned a lot in the color class I took at RISD, but otherwise I was basically teaching myself to paint, and I could do that for free. So in 1993 I dropped out. I hung around Providence for a bit, and then my college friend Nancy Parmet did me a big favor. A rent-controlled apartment in a building her mother owned in New York was becoming vacant. Did I want it? It wasn't much more than my current place, and New York was supposed to be where the artists were. So yes, I wanted it! [7]
Asterix comics begin by zooming in on a tiny corner of Roman Gaul that turns out not to be controlled by the Romans. You can do something similar on a map of New York City: if you zoom in on the Upper East Side, there's a tiny corner that's not rich, or at least wasn't in 1993. It's called Yorkville, and that was my new home. Now I was a New York artist — in the strictly technical sense of making paintings and living in New York.
I was nervous about money, because I could sense that Interleaf was on the way down. Freelance Lisp hacking work was very rare, and I didn't want to have to program in another language, which in those days would have meant C++ if I was lucky. So with my unerring nose for financial opportunity, I decided to write another book on Lisp. This would be a popular book, the sort of book that could be used as a textbook. I imagined myself living frugally off the royalties and spending all my time painting. (The painting on the cover of this book, ANSI Common Lisp, is one that I painted around this time.)
The best thing about New York for me was the presence of Idelle and Julian Weber. Idelle Weber was a painter, one of the early photorealists, and I'd taken her painting class at Harvard. I've never known a teacher more beloved by her students. Large numbers of former students kept in touch with her, including me. After I moved to New York I became her de facto studio assistant.
She liked to paint on big, square canvases, 4 to 5 feet on a side. One day in late 1994 as I was stretching one of these monsters there was something on the radio about a famous fund manager. He wasn't that much older than me, and was super rich. The thought suddenly occurred to me: why don't I become rich? Then I'll be able to work on whatever I want.
Meanwhile I'd been hearing more and more about this new thing called the World Wide Web. Robert Morris showed it to me when I visited him in Cambridge, where he was now in grad school at Harvard. It seemed to me that the web would be a big deal. I'd seen what graphical user interfaces had done for the popularity of microcomputers. It seemed like the web would do the same for the internet.
If I wanted to get rich, here was the next train leaving the station. I was right about that part. What I got wrong was the idea. I decided we should start a company to put art galleries online. I can't honestly say, after reading so many Y Combinator applications, that this was the worst startup idea ever, but it was up there. Art galleries didn't want to be online, and still don't, not the fancy ones. That's not how they sell. I wrote some software to generate web sites for galleries, and Robert wrote some to resize images and set up an http server to serve the pages. Then we tried to sign up galleries. To call this a difficult sale would be an understatement. It was difficult to give away. A few galleries let us make sites for them for free, but none paid us.
Then some online stores started to appear, and I realized that except for the order buttons they were identical to the sites we'd been generating for galleries. This impressive-sounding thing called an "internet storefront" was something we already knew how to build.
So in the summer of 1995, after I submitted the camera-ready copy of ANSI Common Lisp to the publishers, we started trying to write software to build online stores. At first this was going to be normal desktop software, which in those days meant Windows software. That was an alarming prospect, because neither of us knew how to write Windows software or wanted to learn. We lived in the Unix world. But we decided we'd at least try writing a prototype store builder on Unix. Robert wrote a shopping cart, and I wrote a new site generator for stores — in Lisp, of course.
We were working out of Robert's apartment in Cambridge. His roommate was away for big chunks of time, during which I got to sleep in his room. For some reason there was no bed frame or sheets, just a mattress on the floor. One morning as I was lying on this mattress I had an idea that made me sit up like a capital L. What if we ran the software on the server, and let users control it by clicking on links? Then we'd never have to write anything to run on users' computers. We could generate the sites on the same server we'd serve them from. Users wouldn't need anything more than a browser.
This kind of software, known as a web app, is common now, but at the time it wasn't clear that it was even possible. To find out, we decided to try making a version of our store builder that you could control through the browser. A couple days later, on August 12, we had one that worked. The UI was horrible, but it proved you could build a whole store through the browser, without any client software or typing anything into the command line on the server.
Now we felt like we were really onto something. I had visions of a whole new generation of software working this way. You wouldn't need versions, or ports, or any of that crap. At Interleaf there had been a whole group called Release Engineering that seemed to be at least as big as the group that actually wrote the software. Now you could just update the software right on the server.
We started a new company we called Viaweb, after the fact that our software worked via the web, and we got $10,000 in seed funding from Idelle's husband Julian. In return for that and doing the initial legal work and giving us business advice, we gave him 10% of the company. Ten years later this deal became the model for Y Combinator's. We knew founders needed something like this, because we'd needed it ourselves.
At this stage I had a negative net worth, because the thousand dollars or so I had in the bank was more than counterbalanced by what I owed the government in taxes. (Had I diligently set aside the proper proportion of the money I'd made consulting for Interleaf? No, I had not.) So although Robert had his graduate student stipend, I needed that seed funding to live on.
We originally hoped to launch in September, but we got more ambitious about the software as we worked on it. Eventually we managed to build a WYSIWYG site builder, in the sense that as you were creating pages, they looked exactly like the static ones that would be generated later, except that instead of leading to static pages, the links all referred to closures stored in a hash table on the server.
It helped to have studied art, because the main goal of an online store builder is to make users look legit, and the key to looking legit is high production values. If you get page layouts and fonts and colors right, you can make a guy running a store out of his bedroom look more legit than a big company.
(If you're curious why my site looks so old-fashioned, it's because it's still made with this software. It may look clunky today, but in 1996 it was the last word in slick.)
In September, Robert rebelled. "We've been working on this for a month," he said, "and it's still not done." This is funny in retrospect, because he would still be working on it almost 3 years later. But I decided it might be prudent to recruit more programmers, and I asked Robert who else in grad school with him was really good. He recommended Trevor Blackwell, which surprised me at first, because at that point I knew Trevor mainly for his plan to reduce everything in his life to a stack of notecards, which he carried around with him. But Rtm was right, as usual. Trevor turned out to be a frighteningly effective hacker.
It was a lot of fun working with Robert and Trevor. They're the two most independent-minded people I know, and in completely different ways. If you could see inside Rtm's brain it would look like a colonial New England church, and if you could see inside Trevor's it would look like the worst excesses of Austrian Rococo.
We opened for business, with 6 stores, in January 1996. It was just as well we waited a few months, because although we worried we were late, we were actually almost fatally early. There was a lot of talk in the press then about ecommerce, but not many people actually wanted online stores. [8]
There were three main parts to the software: the editor, which people used to build sites and which I wrote, the shopping cart, which Robert wrote, and the manager, which kept track of orders and statistics, and which Trevor wrote. In its time, the editor was one of the best general-purpose site builders. I kept the code tight and didn't have to integrate with any other software except Robert's and Trevor's, so it was quite fun to work on. If all I'd had to do was work on this software, the next 3 years would have been the easiest of my life. Unfortunately I had to do a lot more, all of it stuff I was worse at than programming, and the next 3 years were instead the most stressful.
There were a lot of startups making ecommerce software in the second half of the 90s. We were determined to be the Microsoft Word, not the Interleaf. Which meant being easy to use and inexpensive. It was lucky for us that we were poor, because that caused us to make Viaweb even more inexpensive than we realized. We charged $100 a month for a small store and $300 a month for a big one. This low price was a big attraction, and a constant thorn in the sides of competitors, but it wasn't because of some clever insight that we set the price low. We had no idea what businesses paid for things. $300 a month seemed like a lot of money to us.
We did a lot of things right by accident like that. For example, we did what's now called "doing things that don't scale," although at the time we would have described it as "being so lame that we're driven to the most desperate measures to get users." The most common of which was building stores for them. This seemed particularly humiliating, since the whole reason d'etre of our software was that people could use it to make their own stores. But anything to get users.
We learned a lot more about retail than we wanted to know. For example, that if you could only have a small image of a man's shirt (and all images were small then by present standards), it was better to have a closeup of the collar than a picture of the whole shirt. The reason I remember learning this was that it meant I had to rescan about 30 images of men's shirts. My first set of scans were so beautiful too.
Though this felt wrong, it was exactly the right thing to be doing. Building stores for users taught us about retail, and about how it felt to use our software. I was initially both mystified and repelled by "business" and thought we needed a "business person" to be in charge of it, but once we started to get users, I was converted, in much the same way I was converted to fatherhood once I had kids. Whatever users wanted, I was all theirs. Maybe one day we'd have so many users that I couldn't scan their images for them, but in the meantime there was nothing more important to do.
Another thing I didn't get at the time is that growth rate is the ultimate test of a startup. Our growth rate was fine. We had about 70 stores at the end of 1996 and about 500 at the end of 1997. I mistakenly thought the thing that mattered was the absolute number of users. And that is the thing that matters in the sense that that's how much money you're making, and if you're not making enough, you might go out of business. But in the long term the growth rate takes care of the absolute number. If we'd been a startup I was advising at Y Combinator, I would have said: Stop being so stressed out, because you're doing fine. You're growing 7x a year. Just don't hire too many more people and you'll soon be profitable, and then you'll control your own destiny.
Alas I hired lots more people, partly because our investors wanted me to, and partly because that's what startups did during the Internet Bubble. A company with just a handful of employees would have seemed amateurish. So we didn't reach breakeven until about when Yahoo bought us in the summer of 1998. Which in turn meant we were at the mercy of investors for the entire life of the company. And since both we and our investors were noobs at startups, the result was a mess even by startup standards.
It was a huge relief when Yahoo bought us. In principle our Viaweb stock was valuable. It was a share in a business that was profitable and growing rapidly. But it didn't feel very valuable to me; I had no idea how to value a business, but I was all too keenly aware of the near-death experiences we seemed to have every few months. Nor had I changed my grad student lifestyle significantly since we started. So when Yahoo bought us it felt like going from rags to riches. Since we were going to California, I bought a car, a yellow 1998 VW GTI. I remember thinking that its leather seats alone were by far the most luxurious thing I owned.
The next year, from the summer of 1998 to the summer of 1999, must have been the least productive of my life. I didn't realize it at the time, but I was worn out from the effort and stress of running Viaweb. For a while after I got to California I tried to continue my usual m.o. of programming till 3 in the morning, but fatigue combined with Yahoo's prematurely aged culture and grim cube farm in Santa Clara gradually dragged me down. After a few months it felt disconcertingly like working at Interleaf.
Yahoo had given us a lot of options when they bought us. At the time I thought Yahoo was so overvalued that they'd never be worth anything, but to my astonishment the stock went up 5x in the next year. I hung on till the first chunk of options vested, then in the summer of 1999 I left. It had been so long since I'd painted anything that I'd half forgotten why I was doing this. My brain had been entirely full of software and men's shirts for 4 years. But I had done this to get rich so I could paint, I reminded myself, and now I was rich, so I should go paint.
When I said I was leaving, my boss at Yahoo had a long conversation with me about my plans. I told him all about the kinds of pictures I wanted to paint. At the time I was touched that he took such an interest in me. Now I realize it was because he thought I was lying. My options at that point were worth about $2 million a month. If I was leaving that kind of money on the table, it could only be to go and start some new startup, and if I did, I might take people with me. This was the height of the Internet Bubble, and Yahoo was ground zero of it. My boss was at that moment a billionaire. Leaving then to start a new startup must have seemed to him an insanely, and yet also plausibly, ambitious plan.
But I really was quitting to paint, and I started immediately. There was no time to lose. I'd already burned 4 years getting rich. Now when I talk to founders who are leaving after selling their companies, my advice is always the same: take a vacation. That's what I should have done, just gone off somewhere and done nothing for a month or two, but the idea never occurred to me.
So I tried to paint, but I just didn't seem to have any energy or ambition. Part of the problem was that I didn't know many people in California. I'd compounded this problem by buying a house up in the Santa Cruz Mountains, with a beautiful view but miles from anywhere. I stuck it out for a few more months, then in desperation I went back to New York, where unless you understand about rent control you'll be surprised to hear I still had my apartment, sealed up like a tomb of my old life. Idelle was in New York at least, and there were other people trying to paint there, even though I didn't know any of them.
When I got back to New York I resumed my old life, except now I was rich. It was as weird as it sounds. I resumed all my old patterns, except now there were doors where there hadn't been. Now when I was tired of walking, all I had to do was raise my hand, and (unless it was raining) a taxi would stop to pick me up. Now when I walked past charming little restaurants I could go in and order lunch. It was exciting for a while. Painting started to go better. I experimented with a new kind of still life where I'd paint one painting in the old way, then photograph it and print it, blown up, on canvas, and then use that as the underpainting for a second still life, painted from the same objects (which hopefully hadn't rotted yet).
Meanwhile I looked for an apartment to buy. Now I could actually choose what neighborhood to live in. Where, I asked myself and various real estate agents, is the Cambridge of New York? Aided by occasional visits to actual Cambridge, I gradually realized there wasn't one. Huh.
Around this time, in the spring of 2000, I had an idea. It was clear from our experience with Viaweb that web apps were the future. Why not build a web app for making web apps? Why not let people edit code on our server through the browser, and then host the resulting applications for them? [9] You could run all sorts of services on the servers that these applications could use just by making an API call: making and receiving phone calls, manipulating images, taking credit card payments, etc.
I got so excited about this idea that I couldn't think about anything else. It seemed obvious that this was the future. I didn't particularly want to start another company, but it was clear that this idea would have to be embodied as one, so I decided to move to Cambridge and start it. I hoped to lure Robert into working on it with me, but there I ran into a hitch. Robert was now a postdoc at MIT, and though he'd made a lot of money the last time I'd lured him into working on one of my schemes, it had also been a huge time sink. So while he agreed that it sounded like a plausible idea, he firmly refused to work on it.
Hmph. Well, I'd do it myself then. I recruited Dan Giffin, who had worked for Viaweb, and two undergrads who wanted summer jobs, and we got to work trying to build what it's now clear is about twenty companies and several open-source projects worth of software. The language for defining applications would of course be a dialect of Lisp. But I wasn't so naive as to assume I could spring an overt Lisp on a general audience; we'd hide the parentheses, like Dylan did.
By then there was a name for the kind of company Viaweb was, an "application service provider," or ASP. This name didn't last long before it was replaced by "software as a service," but it was current for long enough that I named this new company after it: it was going to be called Aspra.
I started working on the application builder, Dan worked on network infrastructure, and the two undergrads worked on the first two services (images and phone calls). But about halfway through the summer I realized I really didn't want to run a company — especially not a big one, which it was looking like this would have to be. I'd only started Viaweb because I needed the money. Now that I didn't need money anymore, why was I doing this? If this vision had to be realized as a company, then screw the vision. I'd build a subset that could be done as an open-source project.
Much to my surprise, the time I spent working on this stuff was not wasted after all. After we started Y Combinator, I would often encounter startups working on parts of this new architecture, and it was very useful to have spent so much time thinking about it and even trying to write some of it.
The subset I would build as an open-source project was the new Lisp, whose parentheses I now wouldn't even have to hide. A lot of Lisp hackers dream of building a new Lisp, partly because one of the distinctive features of the language is that it has dialects, and partly, I think, because we have in our minds a Platonic form of Lisp that all existing dialects fall short of. I certainly did. So at the end of the summer Dan and I switched to working on this new dialect of Lisp, which I called Arc, in a house I bought in Cambridge.
The following spring, lightning struck. I was invited to give a talk at a Lisp conference, so I gave one about how we'd used Lisp at Viaweb. Afterward I put a postscript file of this talk online, on paulgraham.com, which I'd created years before using Viaweb but had never used for anything. In one day it got 30,000 page views. What on earth had happened? The referring urls showed that someone had posted it on Slashdot. [10]
Wow, I thought, there's an audience. If I write something and put it on the web, anyone can read it. That may seem obvious now, but it was surprising then. In the print era there was a narrow channel to readers, guarded by fierce monsters known as editors. The only way to get an audience for anything you wrote was to get it published as a book, or in a newspaper or magazine. Now anyone could publish anything.
This had been possible in principle since 1993, but not many people had realized it yet. I had been intimately involved with building the infrastructure of the web for most of that time, and a writer as well, and it had taken me 8 years to realize it. Even then it took me several years to understand the implications. It meant there would be a whole new generation of essays. [11]
In the print era, the channel for publishing essays had been vanishingly small. Except for a few officially anointed thinkers who went to the right parties in New York, the only people allowed to publish essays were specialists writing about their specialties. There were so many essays that had never been written, because there had been no way to publish them. Now they could be, and I was going to write them. [12]
I've worked on several different things, but to the extent there was a turning point where I figured out what to work on, it was when I started publishing essays online. From then on I knew that whatever else I did, I'd always write essays too.
I knew that online essays would be a marginal medium at first. Socially they'd seem more like rants posted by nutjobs on their GeoCities sites than the genteel and beautifully typeset compositions published in The New Yorker. But by this point I knew enough to find that encouraging instead of discouraging.
One of the most conspicuous patterns I've noticed in my life is how well it has worked, for me at least, to work on things that weren't prestigious. Still life has always been the least prestigious form of painting. Viaweb and Y Combinator both seemed lame when we started them. I still get the glassy eye from strangers when they ask what I'm writing, and I explain that it's an essay I'm going to publish on my web site. Even Lisp, though prestigious intellectually in something like the way Latin is, also seems about as hip.
It's not that unprestigious types of work are good per se. But when you find yourself drawn to some kind of work despite its current lack of prestige, it's a sign both that there's something real to be discovered there, and that you have the right kind of motives. Impure motives are a big danger for the ambitious. If anything is going to lead you astray, it will be the desire to impress people. So while working on things that aren't prestigious doesn't guarantee you're on the right track, it at least guarantees you're not on the most common type of wrong one.
Over the next several years I wrote lots of essays about all kinds of different topics. O'Reilly reprinted a collection of them as a book, called Hackers & Painters after one of the essays in it. I also worked on spam filters, and did some more painting. I used to have dinners for a group of friends every thursday night, which taught me how to cook for groups. And I bought another building in Cambridge, a former candy factory (and later, twas said, porn studio), to use as an office.
One night in October 2003 there was a big party at my house. It was a clever idea of my friend Maria Daniels, who was one of the thursday diners. Three separate hosts would all invite their friends to one party. So for every guest, two thirds of the other guests would be people they didn't know but would probably like. One of the guests was someone I didn't know but would turn out to like a lot: a woman called Jessica Livingston. A couple days later I asked her out.
Jessica was in charge of marketing at a Boston investment bank. This bank thought it understood startups, but over the next year, as she met friends of mine from the startup world, she was surprised how different reality was. And how colorful their stories were. So she decided to compile a book of interviews with startup founders.
When the bank had financial problems and she had to fire half her staff, she started looking for a new job. In early 2005 she interviewed for a marketing job at a Boston VC firm. It took them weeks to make up their minds, and during this time I started telling her about all the things that needed to be fixed about venture capital. They should make a larger number of smaller investments instead of a handful of giant ones, they should be funding younger, more technical founders instead of MBAs, they should let the founders remain as CEO, and so on.
One of my tricks for writing essays had always been to give talks. The prospect of having to stand up in front of a group of people and tell them something that won't waste their time is a great spur to the imagination. When the Harvard Computer Society, the undergrad computer club, asked me to give a talk, I decided I would tell them how to start a startup. Maybe they'd be able to avoid the worst of the mistakes we'd made.
So I gave this talk, in the course of which I told them that the best sources of seed funding were successful startup founders, because then they'd be sources of advice too. Whereupon it seemed they were all looking expectantly at me. Horrified at the prospect of having my inbox flooded by business plans (if I'd only known), I blurted out "But not me!" and went on with the talk. But afterward it occurred to me that I should really stop procrastinating about angel investing. I'd been meaning to since Yahoo bought us, and now it was 7 years later and I still hadn't done one angel investment.
Meanwhile I had been scheming with Robert and Trevor about projects we could work on together. I missed working with them, and it seemed like there had to be something we could collaborate on.
As Jessica and I were walking home from dinner on March 11, at the corner of Garden and Walker streets, these three threads converged. Screw the VCs who were taking so long to make up their minds. We'd start our own investment firm and actually implement the ideas we'd been talking about. I'd fund it, and Jessica could quit her job and work for it, and we'd get Robert and Trevor as partners too. [13]
Once again, ignorance worked in our favor. We had no idea how to be angel investors, and in Boston in 2005 there were no Ron Conways to learn from. So we just made what seemed like the obvious choices, and some of the things we did turned out to be novel.
There are multiple components to Y Combinator, and we didn't figure them all out at once. The part we got first was to be an angel firm. In those days, those two words didn't go together. There were VC firms, which were organized companies with people whose job it was to make investments, but they only did big, million dollar investments. And there were angels, who did smaller investments, but these were individuals who were usually focused on other things and made investments on the side. And neither of them helped founders enough in the beginning. We knew how helpless founders were in some respects, because we remembered how helpless we'd been. For example, one thing Julian had done for us that seemed to us like magic was to get us set up as a company. We were fine writing fairly difficult software, but actually getting incorporated, with bylaws and stock and all that stuff, how on earth did you do that? Our plan was not only to make seed investments, but to do for startups everything Julian had done for us.
YC was not organized as a fund. It was cheap enough to run that we funded it with our own money. That went right by 99% of readers, but professional investors are thinking "Wow, that means they got all the returns." But once again, this was not due to any particular insight on our part. We didn't know how VC firms were organized. It never occurred to us to try to raise a fund, and if it had, we wouldn't have known where to start. [14]
The most distinctive thing about YC is the batch model: to fund a bunch of startups all at once, twice a year, and then to spend three months focusing intensively on trying to help them. That part we discovered by accident, not merely implicitly but explicitly due to our ignorance about investing. We needed to get experience as investors. What better way, we thought, than to fund a whole bunch of startups at once? We knew undergrads got temporary jobs at tech companies during the summer. Why not organize a summer program where they'd start startups instead? We wouldn't feel guilty for being in a sense fake investors, because they would in a similar sense be fake founders. So while we probably wouldn't make much money out of it, we'd at least get to practice being investors on them, and they for their part would probably have a more interesting summer than they would working at Microsoft.
We'd use the building I owned in Cambridge as our headquarters. We'd all have dinner there once a week — on tuesdays, since I was already cooking for the thursday diners on thursdays — and after dinner we'd bring in experts on startups to give talks.
We knew undergrads were deciding then about summer jobs, so in a matter of days we cooked up something we called the Summer Founders Program, and I posted an announcement on my site, inviting undergrads to apply. I had never imagined that writing essays would be a way to get "deal flow," as investors call it, but it turned out to be the perfect source. [15] We got 225 applications for the Summer Founders Program, and we were surprised to find that a lot of them were from people who'd already graduated, or were about to that spring. Already this SFP thing was starting to feel more serious than we'd intended.
We invited about 20 of the 225 groups to interview in person, and from those we picked 8 to fund. They were an impressive group. That first batch included reddit, Justin Kan and Emmett Shear, who went on to found Twitch, Aaron Swartz, who had already helped write the RSS spec and would a few years later become a martyr for open access, and Sam Altman, who would later become the second president of YC. I don't think it was entirely luck that the first batch was so good. You had to be pretty bold to sign up for a weird thing like the Summer Founders Program instead of a summer job at a legit place like Microsoft or Goldman Sachs.
The deal for startups was based on a combination of the deal we did with Julian ($10k for 10%) and what Robert said MIT grad students got for the summer ($6k). We invested $6k per founder, which in the typical two-founder case was $12k, in return for 6%. That had to be fair, because it was twice as good as the deal we ourselves had taken. Plus that first summer, which was really hot, Jessica brought the founders free air conditioners. [16]
Fairly quickly I realized that we had stumbled upon the way to scale startup funding. Funding startups in batches was more convenient for us, because it meant we could do things for a lot of startups at once, but being part of a batch was better for the startups too. It solved one of the biggest problems faced by founders: the isolation. Now you not only had colleagues, but colleagues who understood the problems you were facing and could tell you how they were solving them.
As YC grew, we started to notice other advantages of scale. The alumni became a tight community, dedicated to helping one another, and especially the current batch, whose shoes they remembered being in. We also noticed that the startups were becoming one another's customers. We used to refer jokingly to the "YC GDP," but as YC grows this becomes less and less of a joke. Now lots of startups get their initial set of customers almost entirely from among their batchmates.
I had not originally intended YC to be a full-time job. I was going to do three things: hack, write essays, and work on YC. As YC grew, and I grew more excited about it, it started to take up a lot more than a third of my attention. But for the first few years I was still able to work on other things.
In the summer of 2006, Robert and I started working on a new version of Arc. This one was reasonably fast, because it was compiled into Scheme. To test this new Arc, I wrote Hacker News in it. It was originally meant to be a news aggregator for startup founders and was called Startup News, but after a few months I got tired of reading about nothing but startups. Plus it wasn't startup founders we wanted to reach. It was future startup founders. So I changed the name to Hacker News and the topic to whatever engaged one's intellectual curiosity.
HN was no doubt good for YC, but it was also by far the biggest source of stress for me. If all I'd had to do was select and help founders, life would have been so easy. And that implies that HN was a mistake. Surely the biggest source of stress in one's work should at least be something close to the core of the work. Whereas I was like someone who was in pain while running a marathon not from the exertion of running, but because I had a blister from an ill-fitting shoe. When I was dealing with some urgent problem during YC, there was about a 60% chance it had to do with HN, and a 40% chance it had do with everything else combined. [17]
As well as HN, I wrote all of YC's internal software in Arc. But while I continued to work a good deal in Arc, I gradually stopped working on Arc, partly because I didn't have time to, and partly because it was a lot less attractive to mess around with the language now that we had all this infrastructure depending on it. So now my three projects were reduced to two: writing essays and working on YC.
YC was different from other kinds of work I've done. Instead of deciding for myself what to work on, the problems came to me. Every 6 months there was a new batch of startups, and their problems, whatever they were, became our problems. It was very engaging work, because their problems were quite varied, and the good founders were very effective. If you were trying to learn the most you could about startups in the shortest possible time, you couldn't have picked a better way to do it.
There were parts of the job I didn't like. Disputes between cofounders, figuring out when people were lying to us, fighting with people who maltreated the startups, and so on. But I worked hard even at the parts I didn't like. I was haunted by something Kevin Hale once said about companies: "No one works harder than the boss." He meant it both descriptively and prescriptively, and it was the second part that scared me. I wanted YC to be good, so if how hard I worked set the upper bound on how hard everyone else worked, I'd better work very hard.
One day in 2010, when he was visiting California for interviews, Robert Morris did something astonishing: he offered me unsolicited advice. I can only remember him doing that once before. One day at Viaweb, when I was bent over double from a kidney stone, he suggested that it would be a good idea for him to take me to the hospital. That was what it took for Rtm to offer unsolicited advice. So I remember his exact words very clearly. "You know," he said, "you should make sure Y Combinator isn't the last cool thing you do."
At the time I didn't understand what he meant, but gradually it dawned on me that he was saying I should quit. This seemed strange advice, because YC was doing great. But if there was one thing rarer than Rtm offering advice, it was Rtm being wrong. So this set me thinking. It was true that on my current trajectory, YC would be the last thing I did, because it was only taking up more of my attention. It had already eaten Arc, and was in the process of eating essays too. Either YC was my life's work or I'd have to leave eventually. And it wasn't, so I would.
In the summer of 2012 my mother had a stroke, and the cause turned out to be a blood clot caused by colon cancer. The stroke destroyed her balance, and she was put in a nursing home, but she really wanted to get out of it and back to her house, and my sister and I were determined to help her do it. I used to fly up to Oregon to visit her regularly, and I had a lot of time to think on those flights. On one of them I realized I was ready to hand YC over to someone else.
I asked Jessica if she wanted to be president, but she didn't, so we decided we'd try to recruit Sam Altman. We talked to Robert and Trevor and we agreed to make it a complete changing of the guard. Up till that point YC had been controlled by the original LLC we four had started. But we wanted YC to last for a long time, and to do that it couldn't be controlled by the founders. So if Sam said yes, we'd let him reorganize YC. Robert and I would retire, and Jessica and Trevor would become ordinary partners.
When we asked Sam if he wanted to be president of YC, initially he said no. He wanted to start a startup to make nuclear reactors. But I kept at it, and in October 2013 he finally agreed. We decided he'd take over starting with the winter 2014 batch. For the rest of 2013 I left running YC more and more to Sam, partly so he could learn the job, and partly because I was focused on my mother, whose cancer had returned.
She died on January 15, 2014. We knew this was coming, but it was still hard when it did.
I kept working on YC till March, to help get that batch of startups through Demo Day, then I checked out pretty completely. (I still talk to alumni and to new startups working on things I'm interested in, but that only takes a few hours a week.)
What should I do next? Rtm's advice hadn't included anything about that. I wanted to do something completely different, so I decided I'd paint. I wanted to see how good I could get if I really focused on it. So the day after I stopped working on YC, I started painting. I was rusty and it took a while to get back into shape, but it was at least completely engaging. [18]
I spent most of the rest of 2014 painting. I'd never been able to work so uninterruptedly before, and I got to be better than I had been. Not good enough, but better. Then in November, right in the middle of a painting, I ran out of steam. Up till that point I'd always been curious to see how the painting I was working on would turn out, but suddenly finishing this one seemed like a chore. So I stopped working on it and cleaned my brushes and haven't painted since. So far anyway.
I realize that sounds rather wimpy. But attention is a zero sum game. If you can choose what to work on, and you choose a project that's not the best one (or at least a good one) for you, then it's getting in the way of another project that is. And at 50 there was some opportunity cost to screwing around.
I started writing essays again, and wrote a bunch of new ones over the next few months. I even wrote a couple that weren't about startups. Then in March 2015 I started working on Lisp again.
The distinctive thing about Lisp is that its core is a language defined by writing an interpreter in itself. It wasn't originally intended as a programming language in the ordinary sense. It was meant to be a formal model of computation, an alternative to the Turing machine. If you want to write an interpreter for a language in itself, what's the minimum set of predefined operators you need? The Lisp that John McCarthy invented, or more accurately discovered, is an answer to that question. [19]
McCarthy didn't realize this Lisp could even be used to program computers till his grad student Steve Russell suggested it. Russell translated McCarthy's interpreter into IBM 704 machine language, and from that point Lisp started also to be a programming language in the ordinary sense. But its origins as a model of computation gave it a power and elegance that other languages couldn't match. It was this that attracted me in college, though I didn't understand why at the time.
McCarthy's 1960 Lisp did nothing more than interpret Lisp expressions. It was missing a lot of things you'd want in a programming language. So these had to be added, and when they were, they weren't defined using McCarthy's original axiomatic approach. That wouldn't have been feasible at the time. McCarthy tested his interpreter by hand-simulating the execution of programs. But it was already getting close to the limit of interpreters you could test that way — indeed, there was a bug in it that McCarthy had overlooked. To test a more complicated interpreter, you'd have had to run it, and computers then weren't powerful enough.
Now they are, though. Now you could continue using McCarthy's axiomatic approach till you'd defined a complete programming language. And as long as every change you made to McCarthy's Lisp was a discoveredness-preserving transformation, you could, in principle, end up with a complete language that had this quality. Harder to do than to talk about, of course, but if it was possible in principle, why not try? So I decided to take a shot at it. It took 4 years, from March 26, 2015 to October 12, 2019. It was fortunate that I had a precisely defined goal, or it would have been hard to keep at it for so long.
I wrote this new Lisp, called Bel, in itself in Arc. That may sound like a contradiction, but it's an indication of the sort of trickery I had to engage in to make this work. By means of an egregious collection of hacks I managed to make something close enough to an interpreter written in itself that could actually run. Not fast, but fast enough to test.
I had to ban myself from writing essays during most of this time, or I'd never have finished. In late 2015 I spent 3 months writing essays, and when I went back to working on Bel I could barely understand the code. Not so much because it was badly written as because the problem is so convoluted. When you're working on an interpreter written in itself, it's hard to keep track of what's happening at what level, and errors can be practically encrypted by the time you get them.
So I said no more essays till Bel was done. But I told few people about Bel while I was working on it. So for years it must have seemed that I was doing nothing, when in fact I was working harder than I'd ever worked on anything. Occasionally after wrestling for hours with some gruesome bug I'd check Twitter or HN and see someone asking "Does Paul Graham still code?"
Working on Bel was hard but satisfying. I worked on it so intensively that at any given time I had a decent chunk of the code in my head and could write more there. I remember taking the boys to the coast on a sunny day in 2015 and figuring out how to deal with some problem involving continuations while I watched them play in the tide pools. It felt like I was doing life right. I remember that because I was slightly dismayed at how novel it felt. The good news is that I had more moments like this over the next few years.
In the summer of 2016 we moved to England. We wanted our kids to see what it was like living in another country, and since I was a British citizen by birth, that seemed the obvious choice. We only meant to stay for a year, but we liked it so much that we still live there. So most of Bel was written in England.
In the fall of 2019, Bel was finally finished. Like McCarthy's original Lisp, it's a spec rather than an implementation, although like McCarthy's Lisp it's a spec expressed as code.
Now that I could write essays again, I wrote a bunch about topics I'd had stacked up. I kept writing essays through 2020, but I also started to think about other things I could work on. How should I choose what to do? Well, how had I chosen what to work on in the past? I wrote an essay for myself to answer that question, and I was surprised how long and messy the answer turned out to be. If this surprised me, who'd lived it, then I thought perhaps it would be interesting to other people, and encouraging to those with similarly messy lives. So I wrote a more detailed version for others to read, and this is the last sentence of it.
Notes
[1] My experience skipped a step in the evolution of computers: time-sharing machines with interactive OSes. I went straight from batch processing to microcomputers, which made microcomputers seem all the more exciting.
[2] Italian words for abstract concepts can nearly always be predicted from their English cognates (except for occasional traps like polluzione). It's the everyday words that differ. So if you string together a lot of abstract concepts with a few simple verbs, you can make a little Italian go a long way.
[3] I lived at Piazza San Felice 4, so my walk to the Accademia went straight down the spine of old Florence: past the Pitti, across the bridge, past Orsanmichele, between the Duomo and the Baptistery, and then up Via Ricasoli to Piazza San Marco. I saw Florence at street level in every possible condition, from empty dark winter evenings to sweltering summer days when the streets were packed with tourists.
[4] You can of course paint people like still lives if you want to, and they're willing. That sort of portrait is arguably the apex of still life painting, though the long sitting does tend to produce pained expressions in the sitters.
[5] Interleaf was one of many companies that had smart people and built impressive technology, and yet got crushed by Moore's Law. In the 1990s the exponential growth in the power of commodity (i.e. Intel) processors rolled up high-end, special-purpose hardware and software companies like a bulldozer.
[6] The signature style seekers at RISD weren't specifically mercenary. In the art world, money and coolness are tightly coupled. Anything expensive comes to be seen as cool, and anything seen as cool will soon become equally expensive.
[7] Technically the apartment wasn't rent-controlled but rent-stabilized, but this is a refinement only New Yorkers would know or care about. The point is that it was really cheap, less than half market price.
[8] Most software you can launch as soon as it's done. But when the software is an online store builder and you're hosting the stores, if you don't have any users yet, that fact will be painfully obvious. So before we could launch publicly we had to launch privately, in the sense of recruiting an initial set of users and making sure they had decent-looking stores.
[9] We'd had a code editor in Viaweb for users to define their own page styles. They didn't know it, but they were editing Lisp expressions underneath. But this wasn't an app editor, because the code ran when the merchants' sites were generated, not when shoppers visited them.
[10] This was the first instance of what is now a familiar experience, and so was what happened next, when I read the comments and found they were full of angry people. How could I claim that Lisp was better than other languages? Weren't they all Turing complete? People who see the responses to essays I write sometimes tell me how sorry they feel for me, but I'm not exaggerating when I reply that it has always been like this, since the very beginning. It comes with the territory. An essay must tell readers things they don't already know, and some people dislike being told such things.
[11] People put plenty of stuff on the internet in the 90s of course, but putting something online is not the same as publishing it online. Publishing online means you treat the online version as the (or at least a) primary version.
[12] There is a general lesson here that our experience with Y Combinator also teaches: Customs continue to constrain you long after the restrictions that caused them have disappeared. Customary VC practice had once, like the customs about publishing essays, been based on real constraints. Startups had once been much more expensive to start, and proportionally rare. Now they could be cheap and common, but the VCs' customs still reflected the old world, just as customs about writing essays still reflected the constraints of the print era.
Which in turn implies that people who are independent-minded (i.e. less influenced by custom) will have an advantage in fields affected by rapid change (where customs are more likely to be obsolete).
Here's an interesting point, though: you can't always predict which fields will be affected by rapid change. Obviously software and venture capital will be, but who would have predicted that essay writing would be?
[13] Y Combinator was not the original name. At first we were called Cambridge Seed. But we didn't want a regional name, in case someone copied us in Silicon Valley, so we renamed ourselves after one of the coolest tricks in the lambda calculus, the Y combinator.
I picked orange as our color partly because it's the warmest, and partly because no VC used it. In 2005 all the VCs used staid colors like maroon, navy blue, and forest green, because they were trying to appeal to LPs, not founders. The YC logo itself is an inside joke: the Viaweb logo had been a white V on a red circle, so I made the YC logo a white Y on an orange square.
[14] YC did become a fund for a couple years starting in 2009, because it was getting so big I could no longer afford to fund it personally. But after Heroku got bought we had enough money to go back to being self-funded.
[15] I've never liked the term "deal flow," because it implies that the number of new startups at any given time is fixed. This is not only false, but it's the purpose of YC to falsify it, by causing startups to be founded that would not otherwise have existed.
[16] She reports that they were all different shapes and sizes, because there was a run on air conditioners and she had to get whatever she could, but that they were all heavier than she could carry now.
[17] Another problem with HN was a bizarre edge case that occurs when you both write essays and run a forum. When you run a forum, you're assumed to see if not every conversation, at least every conversation involving you. And when you write essays, people post highly imaginative misinterpretations of them on forums. Individually these two phenomena are tedious but bearable, but the combination is disastrous. You actually have to respond to the misinterpretations, because the assumption that you're present in the conversation means that not responding to any sufficiently upvoted misinterpretation reads as a tacit admission that it's correct. But that in turn encourages more; anyone who wants to pick a fight with you senses that now is their chance.
[18] The worst thing about leaving YC was not working with Jessica anymore. We'd been working on YC almost the whole time we'd known each other, and we'd neither tried nor wanted to separate it from our personal lives, so leaving was like pulling up a deeply rooted tree.
[19] One way to get more precise about the concept of invented vs discovered is to talk about space aliens. Any sufficiently advanced alien civilization would certainly know about the Pythagorean theorem, for example. I believe, though with less certainty, that they would also know about the Lisp in McCarthy's 1960 paper.
But if so there's no reason to suppose that this is the limit of the language that might be known to them. Presumably aliens need numbers and errors and I/O too. So it seems likely there exists at least one path out of McCarthy's Lisp along which discoveredness is preserved.
Thanks to Trevor Blackwell, John Collison, Patrick Collison, Daniel Gackle, Ralph Hazell, Jessica Livingston, Robert Morris, and Harj Taggar for reading drafts of this.

@ -941,7 +941,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -16,7 +16,7 @@
"id": "4b47436a",
"metadata": {},
"source": [
"# How to route execution within a chain\n",
"# How to route between sub-chains\n",
"\n",
":::info Prerequisites\n",
"\n",

@ -30,7 +30,7 @@
"\n",
"The resulting [`RunnableSequence`](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.base.RunnableSequence.html) is itself a runnable, which means it can be invoked, streamed, or further chained just like any other runnable. Advantages of chaining runnables in this way are efficient streaming (the sequence will stream output as soon as it is available), and debugging and tracing with tools like [LangSmith](/docs/how_to/debugging).\n",
"\n",
"## The pipe operator\n",
"## The pipe operator: `|`\n",
"\n",
"To show off how this works, let's go through an example. We'll walk through a common pattern in LangChain: using a [prompt template](/docs/how_to#prompt-templates) to format input into a [chat model](/docs/how_to#chat-models), and finally converting the chat message output into a string with an [output parser](/docs/how_to#output-parsers).\n",
"\n",
@ -230,11 +230,28 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"You now know some ways to chain two runnables together.\n",
"Or the abbreviated:"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"composed_chain_with_pipe = RunnableParallel({\"joke\": chain}).pipe(\n",
" analysis_prompt, model, StrOutputParser()\n",
")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Related\n",
"\n",
"To learn more, see the other how-to guides on runnables in this section."
"- [Streaming](/docs/how_to/streaming/): Check out the streaming guide to understand the streaming behavior of a chain\n",
"- "
]
}
],

@ -473,7 +473,7 @@
"id": "12b0ed60-2536-4f82-85df-e096a272072a",
"metadata": {},
"source": [
"To try out our chain, let's see what happens when we try filtering on \"elenis moriset\", a mispelling of Alanis Morissette, without and with retrieval:"
"To try out our chain, let's see what happens when we try filtering on \"elenis moriset\", a misspelling of Alanis Morissette, without and with retrieval:"
]
},
{

@ -71,16 +71,17 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "cd351cf4",
"id": "f123bdcb-8c8b-440c-9bbd-aa5ed4e9cd17",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[33mWARNING: You are using pip version 22.0.4; however, version 24.0 is available.\n",
"You should consider upgrading via the '/Users/jacoblee/.pyenv/versions/3.10.5/bin/python -m pip install --upgrade pip' command.\u001b[0m\u001b[33m\n",
"\u001b[0mNote: you may need to restart the kernel to use updated packages.\n"
"\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m A new release of pip is available: \u001b[0m\u001b[31;49m23.2.1\u001b[0m\u001b[39;49m -> \u001b[0m\u001b[32;49m24.0\u001b[0m\n",
"\u001b[1m[\u001b[0m\u001b[34;49mnotice\u001b[0m\u001b[1;39;49m]\u001b[0m\u001b[39;49m To update, run: \u001b[0m\u001b[32;49mpython -m pip install --upgrade pip\u001b[0m\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
@ -93,25 +94,69 @@
"import os\n",
"from getpass import getpass\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"keys = [\n",
" \"ANTHROPIC_API_KEY\",\n",
" \"OPENAI_API_KEY\",\n",
"]\n",
"\n",
"for key in keys:\n",
" if key not in os.environ:\n",
" os.environ[key] = getpass(f\"Enter API Key for {key}=?\")\n",
"\n",
"\n",
"os.environ[\"ANTHROPIC_API_KEY\"] = getpass()\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass()\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"model = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "a2464c57-0e89-4159-b21f-5859a21be658",
"metadata": {},
"source": [
"Let's start with the sync `stream` API:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "91787fc7-d941-48c0-a8b4-0ee61ab7dd5d",
"id": "8b44dfb2-0749-487a-8918-f8b6b8233093",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The| sky| appears| blue| during| the| da|ytime|.|"
"The| sky| appears| blue| during| the| day|.|"
]
}
],
"source": [
"chunks = []\n",
"for chunk in model.stream(\"what color is the sky?\"):\n",
" chunks.append(chunk)\n",
" print(chunk.content, end=\"|\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "8d835b5c-cbb7-41ab-8905-bdc24d515d29",
"metadata": {},
"source": [
"Alternatively, if you're working in an async environment, you may consider using the async `astream` API:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "f180b6a0-0027-4bd8-8bab-fde76e282609",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"The| sky| appears| blue| during| the| day|.|"
]
}
],
@ -132,17 +177,17 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 4,
"id": "dade3000-1ac4-4f5c-b5c6-a0217f9f8a6b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessageChunk(content='The', id='run-c3885fff-3783-4b6d-85c4-4aeb45a02b1a')"
"AIMessageChunk(content='The', id='run-b36bea64-5511-4d7a-b6a3-a07b3db0c8e7')"
]
},
"execution_count": 3,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
@ -163,17 +208,17 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 5,
"id": "d3cf5f38-249c-4da0-94e6-5e5203fad52e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessageChunk(content='The sky appears blue during', id='run-c3885fff-3783-4b6d-85c4-4aeb45a02b1a')"
"AIMessageChunk(content='The sky appears blue during', id='run-b36bea64-5511-4d7a-b6a3-a07b3db0c8e7')"
]
},
"execution_count": 4,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@ -202,7 +247,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"id": "a8562ae2-3fd1-4829-9801-a5a732b1798d",
"metadata": {},
"outputs": [
@ -214,17 +259,13 @@
"\n",
"A man| goes| to| a| pet| shop| to| buy| a| par|rot|.| The| shop| owner| shows| him| two| stunning| pa|rr|ots| with| beautiful| pl|um|age|.|\n",
"\n",
"\"|There|'s| a| talking| par|rot| and| a| non|-|talking| par|rot|,\"| the| shop| owner| says|.| \"|The| talking| par|rot| costs| $|100|,| and| the| non|-|talking| par|rot| is| $|20|.\"|\n",
"\n",
"The| man| thinks| about| it| and| decides| to| buy| the| cheaper| non|-|talking| par|rot|.|\n",
"\"|There|'s| a| talking| par|rot| an|d a| non|-|talking| par|rot|,\"| the| owner| says|.| \"|The| talking| par|rot| costs| $|100|,| an|d the| non|-|talking| par|rot| is| $|20|.\"|\n",
"\n",
"When| he| gets| home|,| the| par|rot| immediately| speaks| up| and| says|,| \"|Hey|,| buddy|,| I|'m| actually| the| talking| par|rot|,| and| you| got| an| amazing| deal|!\"|\n",
"The| man| says|,| \"|I|'ll| take| the| non|-|talking| par|rot| at| $|20|.\"|\n",
"\n",
"The| man| is| stun|ned| and| rush|es| back| to| the| pet| shop| the| next| day|.|\n",
"He| pays| an|d leaves| with| the| par|rot|.| As| he|'s| walking| down| the| street|,| the| par|rot| looks| up| at| him| an|d says|,| \"|You| know|,| you| really| are| a| stupi|d man|!\"|\n",
"\n",
"\"|That| par|rot| you| sold| me| can| talk|!\"| he| tells| the| shop| owner|.| \"|You| said| it| was| the| non|-|talking| par|rot|,| but| it|'s| been| talking| up| a| storm|!\"|\n",
"\n",
"The| shop| owner| n|ods| and| says|,| \"|Yeah|,| I| know|.| But| did| you| really| think| I| was| going| to| sell| you| the| talking| par|rot| for| just| $|20|?\"|"
"The| man| is| stun|ne|d an|d looks| at| the| par|rot| in| dis|bel|ief|.| The| par|rot| continues|,| \"|Yes|,| you| got| r|ippe|d off| big| time|!| I| can| talk| just| as| well| as| that| other| par|rot|,| an|d you| only| pai|d $|20| |for| me|!\"|"
]
}
],
@ -245,9 +286,11 @@
"id": "868bc412",
"metadata": {},
"source": [
"You might notice above that `parser` actually doesn't block the streaming output from the model, and instead processes each chunk individually. Many of the [LCEL primitives](/docs/how_to#langchain-expression-language-lcel) also support this kind of transform-style passthrough streaming, which can be very convenient when constructing apps.\n",
"Note that we're getting streaming output even though we're using `parser` at the end of the chain above. The `parser` operates on each streaming chunk individidually. Many of the [LCEL primitives](/docs/how_to#langchain-expression-language-lcel) also support this kind of transform-style passthrough streaming, which can be very convenient when constructing apps. \n",
"\n",
"Custom functions can be [designed to return generators](/docs/how_to/functions#streaming), which are able to operate on streams.\n",
"\n",
"Certain runnables, like [prompt templates](/docs/how_to#prompt-templates) and [chat models](/docs/how_to#chat-models), cannot process individual chunks and instead aggregate all previous steps. This will interrupt the streaming process. Custom functions can be [designed to return generators](/docs/how_to/functions#streaming), which"
"Certain runnables, like [prompt templates](/docs/how_to#prompt-templates) and [chat models](/docs/how_to#chat-models), cannot process individual chunks and instead aggregate all previous steps. Such runnables can interrupt the streaming process."
]
},
{
@ -256,10 +299,9 @@
"metadata": {},
"source": [
":::{.callout-note}\n",
"If the above functionality is not relevant to what you're building, you do not have to use the `LangChain Expression Language` to use LangChain and can instead rely on a standard **imperative** programming approach by\n",
"The LangChain Expression language allows you to separate the construction of a chain from the mode in which it is used (e.g., sync/async, batch/streaming etc.). If this is not relevant to what you're building, you can also rely on a standard **imperative** programming approach by\n",
"caling `invoke`, `batch` or `stream` on each component individually, assigning the results to variables and then using them downstream as you see fit.\n",
"\n",
"If that works for your needs, then that's fine by us 👌!\n",
":::"
]
},
@ -283,7 +325,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 7,
"id": "5ff63cce-715a-4561-951f-9321c82e8d81",
"metadata": {},
"outputs": [
@ -321,7 +363,9 @@
" model | JsonOutputParser()\n",
") # Due to a bug in older versions of Langchain, JsonOutputParser did not stream results from some models\n",
"async for text in chain.astream(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\"\n",
"):\n",
" print(text, flush=True)"
]
@ -344,7 +388,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 8,
"id": "d9c90117-9faa-4a01-b484-0db071808d1f",
"metadata": {},
"outputs": [
@ -352,7 +396,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[None, '', 'France', 'France', 'France', 'France', 'France', None, 'France', '', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', None, 'France', 'Spain', '', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan']|"
"['France', 'Spain', 'Japan']|"
]
}
],
@ -386,7 +430,9 @@
"chain = model | JsonOutputParser() | _extract_country_names\n",
"\n",
"async for text in chain.astream(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\"\n",
"):\n",
" print(text, end=\"|\", flush=True)"
]
@ -401,13 +447,13 @@
"Le'ts fix the streaming using a generator function that can operate on the **input stream**.\n",
"\n",
":::{.callout-tip}\n",
"A generator function (a function that uses `yield`) allows writing code that operators on **input streams**\n",
"A generator function (a function that uses `yield`) allows writing code that operates on **input streams**\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "15984b2b-315a-4119-945b-2a3dabea3082",
"metadata": {},
"outputs": [
@ -451,7 +497,9 @@
"chain = model | JsonOutputParser() | _extract_country_names_streaming\n",
"\n",
"async for text in chain.astream(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
"):\n",
" print(text, end=\"|\", flush=True)"
]
@ -480,7 +528,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 10,
"id": "b9b1c00d-8b44-40d0-9e2b-8a70d238f82b",
"metadata": {},
"outputs": [
@ -491,7 +539,7 @@
" Document(page_content='harrison likes spicy food')]]"
]
},
"execution_count": 9,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -536,7 +584,7 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 11,
"id": "957447e6-1e60-41ef-8c10-2654bd9e738d",
"metadata": {},
"outputs": [],
@ -554,7 +602,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 12,
"id": "94e50b5d-bf51-4eee-9da0-ee40dd9ce42b",
"metadata": {},
"outputs": [
@ -562,15 +610,15 @@
"name": "stdout",
"output_type": "stream",
"text": [
"Based| on| the| given| context|,| Harrison| worked| at| K|ens|ho|.|\n",
"Base|d on| the| given| context|,| Harrison| worke|d at| K|ens|ho|.|\n",
"\n",
"Here| are| |3| |made| up| sentences| about| this| place|:|\n",
"\n",
"1|.| K|ens|ho| was| a| cutting|-|edge| technology| company| known| for| its| innovative| solutions| in| artificial| intelligence| and| data| analytics|.|\n",
"1|.| K|ens|ho| was| a| cutting|-|edge| technology| company| known| for| its| innovative| solutions| in| artificial| intelligence| an|d data| analytics|.|\n",
"\n",
"2|.| The| modern| office| space| at| K|ens|ho| featured| open| floor| plans|,| collaborative| work|sp|aces|,| and| a| vib|rant| atmosphere| that| fos|tered| creativity| and| team|work|.|\n",
"2|.| The| modern| office| space| at| K|ens|ho| feature|d open| floor| plans|,| collaborative| work|sp|aces|,| an|d a| vib|rant| atmosphere| that| fos|tere|d creativity| an|d team|work|.|\n",
"\n",
"3|.| With| its| prime| location| in| the| heart| of| the| city|,| K|ens|ho| attracted| top| talent| from| around| the| world|,| creating| a| diverse| and| dynamic| work| environment|.|"
"3|.| With| its| prime| location| in| the| heart| of| the| city|,| K|ens|ho| attracte|d top| talent| from| aroun|d the| worl|d,| creating| a| diverse| an|d dynamic| work| environment|.|"
]
}
],
@ -599,27 +647,17 @@
"Event Streaming is a **beta** API. This API may change a bit based on feedback.\n",
"\n",
":::{.callout-note}\n",
"Introduced in langchain-core **0.1.14**.\n",
"\n",
"This guide demonstrates the `V2` API and requires langchain-core >= 0.2. For the `V1` API compatible with older versions of LangChain, see [here](https://python.langchain.com/v0.1/docs/expression_language/streaming/#using-stream-events).\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": null,
"id": "61348df9-ec58-401e-be89-68a70042f88e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'0.1.45'"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"import langchain_core\n",
"\n",
@ -647,24 +685,21 @@
"When streaming is implemented properly, the inputs to a runnable will not be known until after the input stream has been entirely consumed. This means that `inputs` will often be included only for `end` events and rather than for `start` events.\n",
":::\n",
"\n",
"\n",
"| event | name | chunk | input | output |\n",
"|----------------------|------------------|---------------------------------|-----------------------------------------------|-------------------------------------------------|\n",
"| on_chat_model_start | [model name] | | {\"messages\": [[SystemMessage, HumanMessage]]} | |\n",
"| on_chat_model_stream | [model name] | AIMessageChunk(content=\"hello\") | | |\n",
"| on_chat_model_end | [model name] | | {\"messages\": [[SystemMessage, HumanMessage]]} | {\"generations\": [...], \"llm_output\": None, ...} |\n",
"| on_chat_model_end | [model name] | | {\"messages\": [[SystemMessage, HumanMessage]]} | AIMessageChunk(content=\"hello world\") |\n",
"| on_llm_start | [model name] | | {'input': 'hello'} | |\n",
"| on_llm_stream | [model name] | 'Hello' | | |\n",
"| on_llm_end | [model name] | | 'Hello human!' |\n",
"| on_llm_end | [model name] | | 'Hello human!' | |\n",
"| on_chain_start | format_docs | | | |\n",
"| on_chain_stream | format_docs | \"hello world!, goodbye world!\" | | |\n",
"| on_chain_end | format_docs | | [Document(...)] | \"hello world!, goodbye world!\" |\n",
"| on_tool_start | some_tool | | {\"x\": 1, \"y\": \"2\"} | |\n",
"| on_tool_stream | some_tool | {\"x\": 1, \"y\": \"2\"} | | |\n",
"| on_tool_end | some_tool | | | {\"x\": 1, \"y\": \"2\"} |\n",
"| on_retriever_start | [retriever name] | | {\"query\": \"hello\"} | |\n",
"| on_retriever_chunk | [retriever name] | {documents: [...]} | | |\n",
"| on_retriever_end | [retriever name] | | {\"query\": \"hello\"} | {documents: [...]} |\n",
"| on_retriever_end | [retriever name] | | {\"query\": \"hello\"} | [Document(...), ..] |\n",
"| on_prompt_start | [template_name] | | {\"question\": \"hello\"} | |\n",
"| on_prompt_end | [template_name] | | {\"question\": \"hello\"} | ChatPromptValue(messages: [SystemMessage, ...]) |"
]
@ -681,13 +716,22 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "c00df46e-7f6b-4e06-8abf-801898c8d57f",
"metadata": {},
"outputs": [],
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/home/eugene/src/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: This API is in beta and may change in the future.\n",
" warn_beta(\n"
]
}
],
"source": [
"events = []\n",
"async for event in model.astream_events(\"hello\", version=\"v1\"):\n",
"async for event in model.astream_events(\"hello\", version=\"v2\"):\n",
" events.append(event)"
]
},
@ -698,13 +742,16 @@
"source": [
":::{.callout-note}\n",
"\n",
"Hey what's that funny version=\"v1\" parameter in the API?! 😾\n",
"Hey what's that funny version=\"v2\" parameter in the API?! 😾\n",
"\n",
"This is a **beta API**, and we're almost certainly going to make some changes to it.\n",
"This is a **beta API**, and we're almost certainly going to make some changes to it (in fact, we already have!)\n",
"\n",
"This version parameter will allow us to minimize such breaking changes to your code. \n",
"\n",
"In short, we are annoying you now, so we don't have to annoy you later.\n",
"\n",
"`v2` is only available for langchain-core>=0.2.0.\n",
"\n",
":::"
]
},
@ -718,7 +765,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 15,
"id": "ce31b525-f47d-4828-85a7-912ce9f2e79b",
"metadata": {},
"outputs": [
@ -726,26 +773,26 @@
"data": {
"text/plain": [
"[{'event': 'on_chat_model_start',\n",
" 'run_id': '26134ba4-e486-4552-94d9-a31a2dfe7f4a',\n",
" 'data': {'input': 'hello'},\n",
" 'name': 'ChatAnthropic',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'data': {'input': 'hello'}},\n",
" 'run_id': 'a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3',\n",
" 'metadata': {}},\n",
" {'event': 'on_chat_model_stream',\n",
" 'run_id': '26134ba4-e486-4552-94d9-a31a2dfe7f4a',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'data': {'chunk': AIMessageChunk(content='Hello', id='run-a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3')},\n",
" 'run_id': 'a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3',\n",
" 'name': 'ChatAnthropic',\n",
" 'data': {'chunk': AIMessageChunk(content='Hello', id='run-26134ba4-e486-4552-94d9-a31a2dfe7f4a')}},\n",
" {'event': 'on_chat_model_stream',\n",
" 'run_id': '26134ba4-e486-4552-94d9-a31a2dfe7f4a',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'metadata': {}},\n",
" {'event': 'on_chat_model_stream',\n",
" 'data': {'chunk': AIMessageChunk(content='!', id='run-a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3')},\n",
" 'run_id': 'a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3',\n",
" 'name': 'ChatAnthropic',\n",
" 'data': {'chunk': AIMessageChunk(content='!', id='run-26134ba4-e486-4552-94d9-a31a2dfe7f4a')}}]"
" 'tags': [],\n",
" 'metadata': {}}]"
]
},
"execution_count": 14,
"execution_count": 15,
"metadata": {},
"output_type": "execute_result"
}
@ -756,7 +803,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 16,
"id": "76cfe826-ee63-4310-ad48-55a95eb3b9d6",
"metadata": {},
"outputs": [
@ -764,20 +811,20 @@
"data": {
"text/plain": [
"[{'event': 'on_chat_model_stream',\n",
" 'run_id': '26134ba4-e486-4552-94d9-a31a2dfe7f4a',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'data': {'chunk': AIMessageChunk(content='?', id='run-a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3')},\n",
" 'run_id': 'a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3',\n",
" 'name': 'ChatAnthropic',\n",
" 'data': {'chunk': AIMessageChunk(content='?', id='run-26134ba4-e486-4552-94d9-a31a2dfe7f4a')}},\n",
" 'tags': [],\n",
" 'metadata': {}},\n",
" {'event': 'on_chat_model_end',\n",
" 'data': {'output': AIMessageChunk(content='Hello! How can I assist you today?', id='run-a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3')},\n",
" 'run_id': 'a81e4c0f-fc36-4d33-93bc-1ac25b9bb2c3',\n",
" 'name': 'ChatAnthropic',\n",
" 'run_id': '26134ba4-e486-4552-94d9-a31a2dfe7f4a',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'data': {'output': AIMessageChunk(content='Hello! How can I assist you today?', id='run-26134ba4-e486-4552-94d9-a31a2dfe7f4a')}}]"
" 'metadata': {}}]"
]
},
"execution_count": 15,
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
@ -798,7 +845,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 17,
"id": "4328c56c-a303-427b-b1f2-f354e9af555c",
"metadata": {},
"outputs": [],
@ -810,8 +857,10 @@
"events = [\n",
" event\n",
" async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
" version=\"v2\",\n",
" )\n",
"]"
]
@ -832,7 +881,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 18,
"id": "8e66ea3d-a450-436a-aaac-d9478abc6c28",
"metadata": {},
"outputs": [
@ -840,26 +889,26 @@
"data": {
"text/plain": [
"[{'event': 'on_chain_start',\n",
" 'run_id': '93c65519-a480-43f2-b340-851706799c57',\n",
" 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'},\n",
" 'name': 'RunnableSequence',\n",
" 'tags': [],\n",
" 'metadata': {},\n",
" 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}},\n",
" 'run_id': '4765006b-16e2-4b1d-a523-edd9fd64cb92',\n",
" 'metadata': {}},\n",
" {'event': 'on_chat_model_start',\n",
" 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}},\n",
" 'name': 'ChatAnthropic',\n",
" 'run_id': '6075a178-bc34-4ef2-bbb4-75c3ed96eb9c',\n",
" 'tags': ['seq:step:1'],\n",
" 'metadata': {},\n",
" 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}},\n",
" 'run_id': '0320c234-7b52-4a14-ae4e-5f100949e589',\n",
" 'metadata': {}},\n",
" {'event': 'on_chat_model_stream',\n",
" 'data': {'chunk': AIMessageChunk(content='{', id='run-0320c234-7b52-4a14-ae4e-5f100949e589')},\n",
" 'run_id': '0320c234-7b52-4a14-ae4e-5f100949e589',\n",
" 'name': 'ChatAnthropic',\n",
" 'run_id': '6075a178-bc34-4ef2-bbb4-75c3ed96eb9c',\n",
" 'tags': ['seq:step:1'],\n",
" 'metadata': {},\n",
" 'data': {'chunk': AIMessageChunk(content='{', id='run-6075a178-bc34-4ef2-bbb4-75c3ed96eb9c')}}]"
" 'metadata': {}}]"
]
},
"execution_count": 17,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
@ -886,7 +935,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 19,
"id": "630c71d6-8d94-4ce0-a78a-f20e90f628df",
"metadata": {},
"outputs": [
@ -925,8 +974,10 @@
"num_events = 0\n",
"\n",
"async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
" version=\"v2\",\n",
"):\n",
" kind = event[\"event\"]\n",
" if kind == \"on_chat_model_stream\":\n",
@ -967,7 +1018,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 20,
"id": "4f0b581b-be63-4663-baba-c6d2b625cdf9",
"metadata": {},
"outputs": [
@ -975,17 +1026,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_parser_start', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': []}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': ''}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France'}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}, {}]}}}\n",
"{'event': 'on_parser_stream', 'name': 'my_parser', 'run_id': 'b817e94b-db03-4b6f-8432-019dd59a2d93', 'tags': ['seq:step:2'], 'metadata': {}, 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}, {'name': ''}]}}}\n",
"{'event': 'on_parser_start', 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}, 'name': 'my_parser', 'tags': ['seq:step:2'], 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': []}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': ''}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France'}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}, {}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {'countries': [{'name': 'France', 'population': 67413000}, {'name': ''}]}}, 'run_id': 'e058d750-f2c2-40f6-aa61-10f84cd671a9', 'name': 'my_parser', 'tags': ['seq:step:2'], 'metadata': {}}\n",
"...\n"
]
}
@ -997,8 +1048,10 @@
"\n",
"max_events = 0\n",
"async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
" version=\"v2\",\n",
" include_names=[\"my_parser\"],\n",
"):\n",
" print(event)\n",
@ -1019,7 +1072,7 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 21,
"id": "096cd904-72f0-4ebe-a8b7-d0e730faea7f",
"metadata": {},
"outputs": [
@ -1027,17 +1080,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chat_model_start', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='{', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\"', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='countries', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\":', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' [', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='{', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'model', 'run_id': '02b68bbd-e99b-4a66-bf5f-6e238bfd0182', 'tags': ['seq:step:1'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\"', id='run-02b68bbd-e99b-4a66-bf5f-6e238bfd0182')}}\n",
"{'event': 'on_chat_model_start', 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}, 'name': 'model', 'tags': ['seq:step:1'], 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='{', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\"', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='countries', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\":', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' [', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='{', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\"', id='run-db246792-2a91-4eb3-a14b-29658947065d')}, 'run_id': 'db246792-2a91-4eb3-a14b-29658947065d', 'name': 'model', 'tags': ['seq:step:1'], 'metadata': {}}\n",
"...\n"
]
}
@ -1050,7 +1103,7 @@
"max_events = 0\n",
"async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" version=\"v2\",\n",
" include_types=[\"chat_model\"],\n",
"):\n",
" print(event)\n",
@ -1078,7 +1131,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 22,
"id": "26bac0d2-76d9-446e-b346-82790236b88d",
"metadata": {},
"outputs": [
@ -1086,17 +1139,17 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'run_id': '55ab7082-7200-4545-8f45-bb0997b0bce8', 'name': 'RunnableSequence', 'tags': ['my_chain'], 'metadata': {}, 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}}\n",
"{'event': 'on_chat_model_start', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='{', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_parser_start', 'name': 'JsonOutputParser', 'run_id': 'bc80bc6d-5ae5-4d3a-9bb6-006c0e9c67c5', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {}}\n",
"{'event': 'on_parser_stream', 'name': 'JsonOutputParser', 'run_id': 'bc80bc6d-5ae5-4d3a-9bb6-006c0e9c67c5', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}, 'data': {'chunk': {}}}\n",
"{'event': 'on_chain_stream', 'run_id': '55ab7082-7200-4545-8f45-bb0997b0bce8', 'tags': ['my_chain'], 'metadata': {}, 'name': 'RunnableSequence', 'data': {'chunk': {}}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\"', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='countries', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content='\":', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_chat_model_stream', 'name': 'ChatAnthropic', 'run_id': 'd2efdbe8-77e4-4b29-ae68-be163239385e', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}, 'data': {'chunk': AIMessageChunk(content=' [', id='run-d2efdbe8-77e4-4b29-ae68-be163239385e')}}\n",
"{'event': 'on_chain_start', 'data': {'input': 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`'}, 'name': 'RunnableSequence', 'tags': ['my_chain'], 'run_id': 'fd68dd64-7a4d-4bdb-a0c2-ee592db0d024', 'metadata': {}}\n",
"{'event': 'on_chat_model_start', 'data': {'input': {'messages': [[HumanMessage(content='output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`')]]}}, 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='{', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_parser_start', 'data': {}, 'name': 'JsonOutputParser', 'tags': ['seq:step:2', 'my_chain'], 'run_id': 'afde30b9-beac-4b36-b4c7-dbbe423ddcdb', 'metadata': {}}\n",
"{'event': 'on_parser_stream', 'data': {'chunk': {}}, 'run_id': 'afde30b9-beac-4b36-b4c7-dbbe423ddcdb', 'name': 'JsonOutputParser', 'tags': ['seq:step:2', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_chain_stream', 'data': {'chunk': {}}, 'run_id': 'fd68dd64-7a4d-4bdb-a0c2-ee592db0d024', 'name': 'RunnableSequence', 'tags': ['my_chain'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\\n ', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\"', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='countries', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content='\":', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"{'event': 'on_chat_model_stream', 'data': {'chunk': AIMessageChunk(content=' [', id='run-efd3c8af-4be5-4f6c-9327-e3f9865dd1cd')}, 'run_id': 'efd3c8af-4be5-4f6c-9327-e3f9865dd1cd', 'name': 'ChatAnthropic', 'tags': ['seq:step:1', 'my_chain'], 'metadata': {}}\n",
"...\n"
]
}
@ -1107,7 +1160,7 @@
"max_events = 0\n",
"async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" version=\"v2\",\n",
" include_tags=[\"my_chain\"],\n",
"):\n",
" print(event)\n",
@ -1132,7 +1185,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 23,
"id": "0e6451d3-3b11-4a71-ae19-998f4c10180f",
"metadata": {},
"outputs": [],
@ -1174,7 +1227,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 24,
"id": "f9a8fe35-faab-4970-b8c0-5c780845d98a",
"metadata": {},
"outputs": [
@ -1182,13 +1235,15 @@
"name": "stdout",
"output_type": "stream",
"text": [
"[None, '', 'France', 'France', 'France', 'France', 'France', None, 'France', '', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', 'France', 'Spain', None, 'France', 'Spain', '', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan', 'France', 'Spain', 'Japan']\n"
"['France', 'Spain', 'Japan']\n"
]
}
],
"source": [
"async for chunk in chain.astream(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
"):\n",
" print(chunk, flush=True)"
]
@ -1203,7 +1258,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 25,
"id": "b08215cd-bffa-4e76-aaf3-c52ee34f152c",
"metadata": {},
"outputs": [
@ -1246,8 +1301,10 @@
"num_events = 0\n",
"\n",
"async for event in chain.astream_events(\n",
" 'output a list of the countries france, spain and japan and their populations in JSON format. Use a dict with an outer key of \"countries\" which contains a list of countries. Each country should have the key `name` and `population`',\n",
" version=\"v1\",\n",
" \"output a list of the countries france, spain and japan and their populations in JSON format. \"\n",
" 'Use a dict with an outer key of \"countries\" which contains a list of countries. '\n",
" \"Each country should have the key `name` and `population`\",\n",
" version=\"v2\",\n",
"):\n",
" kind = event[\"event\"]\n",
" if kind == \"on_chat_model_stream\":\n",
@ -1282,7 +1339,7 @@
},
{
"cell_type": "code",
"execution_count": 25,
"execution_count": 26,
"id": "1854206d-b3a5-4f91-9e00-bccbaebac61f",
"metadata": {},
"outputs": [
@ -1290,9 +1347,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_tool_start', 'run_id': 'b5ffad93-6dcf-4c95-9dfa-a35675c6bbc3', 'name': 'bad_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
"{'event': 'on_tool_stream', 'run_id': 'b5ffad93-6dcf-4c95-9dfa-a35675c6bbc3', 'tags': [], 'metadata': {}, 'name': 'bad_tool', 'data': {'chunk': 'olleh'}}\n",
"{'event': 'on_tool_end', 'name': 'bad_tool', 'run_id': 'b5ffad93-6dcf-4c95-9dfa-a35675c6bbc3', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
"{'event': 'on_tool_start', 'data': {'input': 'hello'}, 'name': 'bad_tool', 'tags': [], 'run_id': 'ea900472-a8f7-425d-b627-facdef936ee8', 'metadata': {}}\n",
"{'event': 'on_chain_start', 'data': {'input': 'hello'}, 'name': 'reverse_word', 'tags': [], 'run_id': '77b01284-0515-48f4-8d7c-eb27c1882f86', 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': 'olleh', 'input': 'hello'}, 'run_id': '77b01284-0515-48f4-8d7c-eb27c1882f86', 'name': 'reverse_word', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_tool_end', 'data': {'output': 'olleh'}, 'run_id': 'ea900472-a8f7-425d-b627-facdef936ee8', 'name': 'bad_tool', 'tags': [], 'metadata': {}}\n"
]
}
],
@ -1314,7 +1372,7 @@
" return reverse_word.invoke(word)\n",
"\n",
"\n",
"async for event in bad_tool.astream_events(\"hello\", version=\"v1\"):\n",
"async for event in bad_tool.astream_events(\"hello\", version=\"v2\"):\n",
" print(event)"
]
},
@ -1328,7 +1386,7 @@
},
{
"cell_type": "code",
"execution_count": 26,
"execution_count": 27,
"id": "a20a6cb3-bb43-465c-8cfc-0a7349d70968",
"metadata": {},
"outputs": [
@ -1336,11 +1394,10 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_tool_start', 'run_id': 'be7f9379-5340-433e-b1fc-84314353cd17', 'name': 'correct_tool', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
"{'event': 'on_chain_start', 'name': 'reverse_word', 'run_id': '50bfe8a9-64c5-4ed8-8dae-03415b5b7c6e', 'tags': [], 'metadata': {}, 'data': {'input': 'hello'}}\n",
"{'event': 'on_chain_end', 'name': 'reverse_word', 'run_id': '50bfe8a9-64c5-4ed8-8dae-03415b5b7c6e', 'tags': [], 'metadata': {}, 'data': {'input': 'hello', 'output': 'olleh'}}\n",
"{'event': 'on_tool_stream', 'run_id': 'be7f9379-5340-433e-b1fc-84314353cd17', 'tags': [], 'metadata': {}, 'name': 'correct_tool', 'data': {'chunk': 'olleh'}}\n",
"{'event': 'on_tool_end', 'name': 'correct_tool', 'run_id': 'be7f9379-5340-433e-b1fc-84314353cd17', 'tags': [], 'metadata': {}, 'data': {'output': 'olleh'}}\n"
"{'event': 'on_tool_start', 'data': {'input': 'hello'}, 'name': 'correct_tool', 'tags': [], 'run_id': 'd5ea83b9-9278-49cc-9f1d-aa302d671040', 'metadata': {}}\n",
"{'event': 'on_chain_start', 'data': {'input': 'hello'}, 'name': 'reverse_word', 'tags': [], 'run_id': '44dafbf4-2f87-412b-ae0e-9f71713810df', 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': 'olleh', 'input': 'hello'}, 'run_id': '44dafbf4-2f87-412b-ae0e-9f71713810df', 'name': 'reverse_word', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_tool_end', 'data': {'output': 'olleh'}, 'run_id': 'd5ea83b9-9278-49cc-9f1d-aa302d671040', 'name': 'correct_tool', 'tags': [], 'metadata': {}}\n"
]
}
],
@ -1351,7 +1408,7 @@
" return reverse_word.invoke(word, {\"callbacks\": callbacks})\n",
"\n",
"\n",
"async for event in correct_tool.astream_events(\"hello\", version=\"v1\"):\n",
"async for event in correct_tool.astream_events(\"hello\", version=\"v2\"):\n",
" print(event)"
]
},
@ -1365,7 +1422,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 28,
"id": "0ac0a3c1-f3a4-4157-b053-4fec8d2e698c",
"metadata": {},
"outputs": [
@ -1373,9 +1430,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'run_id': 'a5d11046-93fa-4cd9-9854-d3afa3d686ef', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
"{'event': 'on_chain_stream', 'run_id': 'a5d11046-93fa-4cd9-9854-d3afa3d686ef', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': 'a5d11046-93fa-4cd9-9854-d3afa3d686ef', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
"{'event': 'on_chain_start', 'data': {'input': '1234'}, 'name': 'reverse_and_double', 'tags': [], 'run_id': '03b0e6a1-3e60-42fc-8373-1e7829198d80', 'metadata': {}}\n",
"{'event': 'on_chain_start', 'data': {'input': '1234'}, 'name': 'reverse_word', 'tags': [], 'run_id': '5cf26fc8-840b-4642-98ed-623dda28707a', 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': '4321', 'input': '1234'}, 'run_id': '5cf26fc8-840b-4642-98ed-623dda28707a', 'name': 'reverse_word', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_chain_stream', 'data': {'chunk': '43214321'}, 'run_id': '03b0e6a1-3e60-42fc-8373-1e7829198d80', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': '43214321'}, 'run_id': '03b0e6a1-3e60-42fc-8373-1e7829198d80', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}}\n"
]
}
],
@ -1391,7 +1450,7 @@
"\n",
"await reverse_and_double.ainvoke(\"1234\")\n",
"\n",
"async for event in reverse_and_double.astream_events(\"1234\", version=\"v1\"):\n",
"async for event in reverse_and_double.astream_events(\"1234\", version=\"v2\"):\n",
" print(event)"
]
},
@ -1405,7 +1464,7 @@
},
{
"cell_type": "code",
"execution_count": 28,
"execution_count": 29,
"id": "c896bb94-9d10-41ff-8fe2-d6b05b1ed74b",
"metadata": {},
"outputs": [
@ -1413,9 +1472,11 @@
"name": "stdout",
"output_type": "stream",
"text": [
"{'event': 'on_chain_start', 'run_id': 'b3eff5c2-8339-4e15-98b3-85148d9ae350', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}, 'data': {'input': '1234'}}\n",
"{'event': 'on_chain_stream', 'run_id': 'b3eff5c2-8339-4e15-98b3-85148d9ae350', 'tags': [], 'metadata': {}, 'name': 'reverse_and_double', 'data': {'chunk': '43214321'}}\n",
"{'event': 'on_chain_end', 'name': 'reverse_and_double', 'run_id': 'b3eff5c2-8339-4e15-98b3-85148d9ae350', 'tags': [], 'metadata': {}, 'data': {'output': '43214321'}}\n"
"{'event': 'on_chain_start', 'data': {'input': '1234'}, 'name': 'reverse_and_double', 'tags': [], 'run_id': '1bfcaedc-f4aa-4d8e-beee-9bba6ef17008', 'metadata': {}}\n",
"{'event': 'on_chain_start', 'data': {'input': '1234'}, 'name': 'reverse_word', 'tags': [], 'run_id': '64fc99f0-5d7d-442b-b4f5-4537129f67d1', 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': '4321', 'input': '1234'}, 'run_id': '64fc99f0-5d7d-442b-b4f5-4537129f67d1', 'name': 'reverse_word', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_chain_stream', 'data': {'chunk': '43214321'}, 'run_id': '1bfcaedc-f4aa-4d8e-beee-9bba6ef17008', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}}\n",
"{'event': 'on_chain_end', 'data': {'output': '43214321'}, 'run_id': '1bfcaedc-f4aa-4d8e-beee-9bba6ef17008', 'name': 'reverse_and_double', 'tags': [], 'metadata': {}}\n"
]
}
],
@ -1430,7 +1491,7 @@
"\n",
"await reverse_and_double.ainvoke(\"1234\")\n",
"\n",
"async for event in reverse_and_double.astream_events(\"1234\", version=\"v1\"):\n",
"async for event in reverse_and_double.astream_events(\"1234\", version=\"v2\"):\n",
" print(event)"
]
},

@ -24,18 +24,21 @@
"- [Function/tool calling](/docs/concepts/#functiontool-calling)\n",
":::\n",
"\n",
"It is often useful to have a model return output that matches some specific schema. One common use-case is extracting data from arbitrary text to insert into a traditional database or use with some other downstrem system. This guide will show you a few different strategies you can use to do this.\n",
"\n",
"It is often useful to have a model return output that matches a specific schema. One common use-case is extracting data from text to insert into a database or use with some other downstream system. This guide covers a few strategies for getting structured outputs from a model.\n",
"\n",
"## The `.with_structured_output()` method\n",
"\n",
"There are several strategies that models can use under the hood. For some of the most popular model providers, including [OpenAI](/docs/integrations/platforms/openai/), [Anthropic](/docs/integrations/platforms/anthropic/), and [Mistral](/docs/integrations/providers/mistralai/), LangChain implements a common interface that abstracts away these strategies called `.with_structured_output`.\n",
":::info Supported models\n",
"\n",
"You can find a [list of models that support this method here](/docs/integrations/chat/).\n",
"\n",
"By invoking this method (and passing in [JSON schema](https://json-schema.org/) or a [Pydantic](https://docs.pydantic.dev/latest/) model) the model will add whatever model parameters + output parsers are necessary to get back structured output matching the requested schema. If the model supports more than one way to do this (e.g., function calling vs JSON mode) - you can configure which method to use by passing into that method.\n",
":::\n",
"\n",
"You can find the [current list of models that support this method here](/docs/integrations/chat/).\n",
"This is the easiest and most reliable way to get structured outputs. `with_structured_output()` is implemented for models that provide native APIs for structuring outputs, like tool/function calling or JSON mode, and makes use of these capabilities under the hood.\n",
"\n",
"Let's look at some examples of this in action! We'll use Pydantic to create a simple response schema.\n",
"This method takes a schema as input which specifies the names, types, and descriptions of the desired output attributes. The method returns a model-like Runnable, except that instead of outputting strings or Messages it outputs objects corresponding to the given schema. The schema can be specified as a [JSON Schema](https://json-schema.org/) or a Pydantic class. If JSON Schema is used then a dictionary will be returned by the Runnable, and if a Pydantic class is used then Pydantic objects will be returned.\n",
"\n",
"As an example, let's get a model to generate a joke and separate the setup from the punchline:\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
@ -58,25 +61,30 @@
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(\n",
" model=\"gpt-4-0125-preview\",\n",
" temperature=0,\n",
")"
"llm = ChatOpenAI(model=\"gpt-4-0125-preview\", temperature=0)"
]
},
{
"cell_type": "markdown",
"id": "a808a401-be1f-49f9-ad13-58dd68f7db5f",
"metadata": {},
"source": [
"If we want the model to return a Pydantic object, we just need to pass in desired the Pydantic class:"
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 38,
"id": "070bf702",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=None)"
"Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=None)"
]
},
"execution_count": 13,
"execution_count": 38,
"metadata": {},
"output_type": "execute_result"
}
@ -88,6 +96,8 @@
"\n",
"\n",
"class Joke(BaseModel):\n",
" \"\"\"Joke to tell user.\"\"\"\n",
"\n",
" setup: str = Field(description=\"The setup of the joke\")\n",
" punchline: str = Field(description=\"The punchline to the joke\")\n",
" rating: Optional[int] = Field(description=\"How funny the joke is, from 1 to 10\")\n",
@ -98,25 +108,27 @@
"structured_llm.invoke(\"Tell me a joke about cats\")"
]
},
{
"cell_type": "markdown",
"id": "00890a47-3cdf-4805-b8f1-6d110f0633d3",
"metadata": {},
"source": [
":::tip\n",
"Beyond just the structure of the Pydantic class, the name of the Pydantic class, the docstring, and the names and provided descriptions of parameters are very important. Most of the time `with_structured_output` is using a model's function/tool calling API, and you can effectively think of all of this information as being added to the model prompt.\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "deddb6d3",
"metadata": {},
"source": [
"The result is a Pydantic model. Note that name of the model and the names and provided descriptions of parameters are very important, as they help guide the model's output.\n",
"\n",
"We can also pass in an OpenAI-style JSON schema dict if you prefer not to use Pydantic. This dict should contain three properties:\n",
"\n",
"- `name`: The name of the schema to output.\n",
"- `description`: A high level description of the schema to output.\n",
"- `parameters`: The nested details of the schema you want to extract, formatted as a [JSON schema](https://json-schema.org/) dict.\n",
"\n",
"In this case, the response is also a dict:"
"We can also pass in a [JSON Schema](https://json-schema.org/) dict if you prefer not to use Pydantic. In this case, the response is also a dict:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 8,
"id": "6700994a",
"metadata": {},
"outputs": [
@ -124,30 +136,37 @@
"data": {
"text/plain": [
"{'setup': 'Why was the cat sitting on the computer?',\n",
" 'punchline': 'To keep an eye on the mouse!'}"
" 'punchline': 'Because it wanted to keep an eye on the mouse!',\n",
" 'rating': 8}"
]
},
"execution_count": 3,
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"structured_llm = llm.with_structured_output(\n",
" {\n",
" \"name\": \"joke\",\n",
" \"description\": \"Joke to tell user.\",\n",
" \"parameters\": {\n",
" \"title\": \"Joke\",\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"setup\": {\"type\": \"string\", \"description\": \"The setup for the joke\"},\n",
" \"punchline\": {\"type\": \"string\", \"description\": \"The joke's punchline\"},\n",
" },\n",
" \"required\": [\"setup\", \"punchline\"],\n",
"json_schema = {\n",
" \"title\": \"joke\",\n",
" \"description\": \"Joke to tell user.\",\n",
" \"type\": \"object\",\n",
" \"properties\": {\n",
" \"setup\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The setup of the joke\",\n",
" },\n",
" }\n",
")\n",
" \"punchline\": {\n",
" \"type\": \"string\",\n",
" \"description\": \"The punchline to the joke\",\n",
" },\n",
" \"rating\": {\n",
" \"type\": \"integer\",\n",
" \"description\": \"How funny the joke is, from 1 to 10\",\n",
" },\n",
" },\n",
" \"required\": [\"setup\", \"punchline\"],\n",
"}\n",
"structured_llm = llm.with_structured_output(json_schema)\n",
"\n",
"structured_llm.invoke(\"Tell me a joke about cats\")"
]
@ -159,7 +178,7 @@
"source": [
"### Choosing between multiple schemas\n",
"\n",
"If you have multiple schemas that are valid outputs for the model, you can use Pydantic's `Union` type:"
"The simplest way to let the model choose from multiple schemas is to create a parent Pydantic class that has a Union-typed attribute:"
]
},
{
@ -171,7 +190,7 @@
{
"data": {
"text/plain": [
"Response(output=Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!'))"
"Response(output=Joke(setup='Why was the cat sitting on the computer?', punchline='To keep an eye on the mouse!', rating=8))"
]
},
"execution_count": 4,
@ -182,15 +201,10 @@
"source": [
"from typing import Union\n",
"\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class Joke(BaseModel):\n",
" setup: str = Field(description=\"The setup of the joke\")\n",
" punchline: str = Field(description=\"The punchline to the joke\")\n",
"\n",
"\n",
"class ConversationalResponse(BaseModel):\n",
" \"\"\"Respond in a conversational manner. Be kind and helpful.\"\"\"\n",
"\n",
" response: str = Field(description=\"A conversational response to the user's query\")\n",
"\n",
"\n",
@ -212,7 +226,7 @@
{
"data": {
"text/plain": [
"Response(output=ConversationalResponse(response=\"I'm just a collection of code, so I don't have feelings, but thanks for asking! How can I assist you today?\"))"
"Response(output=ConversationalResponse(response=\"I'm just a digital assistant, so I don't have feelings, but I'm here and ready to help you. How can I assist you today?\"))"
]
},
"execution_count": 5,
@ -229,9 +243,225 @@
"id": "e28c14d3",
"metadata": {},
"source": [
"If you are using JSON Schema, you can take advantage of other more complex schema descriptions to create a similar effect.\n",
"Alternatively, you can use tool calling directly to allow the model to choose between options, if your [chosen model supports it](/docs/integrations/chat/). This involves a bit more parsing and setup but in some instances leads to better performance because you don't have to use nested schemas. See [this how-to guide](/docs/how_to/tool_calling/) for more details."
]
},
{
"cell_type": "markdown",
"id": "9a40f703-7fd2-4fe0-ab2a-fa2d711ba009",
"metadata": {},
"source": [
"### Streaming\n",
"\n",
"We can stream outputs from our structured model when the output type is a dict (i.e., when the schema is specified as a JSON Schema dict). \n",
"\n",
":::info\n",
"\n",
"Note that what's yielded is already aggregated chunks, not deltas.\n",
"\n",
":::"
]
},
{
"cell_type": "code",
"execution_count": 43,
"id": "aff89877-28a3-472f-a1aa-eff893fe7736",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{}\n",
"{'setup': ''}\n",
"{'setup': 'Why'}\n",
"{'setup': 'Why was'}\n",
"{'setup': 'Why was the'}\n",
"{'setup': 'Why was the cat'}\n",
"{'setup': 'Why was the cat sitting'}\n",
"{'setup': 'Why was the cat sitting on'}\n",
"{'setup': 'Why was the cat sitting on the'}\n",
"{'setup': 'Why was the cat sitting on the computer'}\n",
"{'setup': 'Why was the cat sitting on the computer?'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': ''}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse!'}\n",
"{'setup': 'Why was the cat sitting on the computer?', 'punchline': 'Because it wanted to keep an eye on the mouse!', 'rating': 8}\n"
]
}
],
"source": [
"structured_llm = llm.with_structured_output(json_schema)\n",
"\n",
"for chunk in structured_llm.stream(\"Tell me a joke about cats\"):\n",
" print(chunk)"
]
},
{
"cell_type": "markdown",
"id": "0a526cdf-e736-451b-96be-22e8986d3863",
"metadata": {},
"source": [
"### Few-shot prompting\n",
"\n",
"For more complex schemas it's very useful to add few-shot examples to the prompt. This can be done in a few ways.\n",
"\n",
"The simplest and most universal way is to add examples to a system message in the prompt:"
]
},
{
"cell_type": "code",
"execution_count": 47,
"id": "283ba784-2072-47ee-9b2c-1119e3c69e8e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'setup': 'Woodpecker',\n",
" 'punchline': \"Woodpecker goes 'knock knock', but don't worry, they never expect you to answer the door!\",\n",
" 'rating': 8}"
]
},
"execution_count": 47,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"system = \"\"\"You are a hilarious comedian. Your specialty is knock-knock jokes. \\\n",
"Return a joke which has the setup (the response to \"Who's there?\") and the final punchline (the response to \"<setup> who?\").\n",
"\n",
"Here are some examples of jokes:\n",
"\n",
"example_user: Tell me a joke about planes\n",
"example_assistant: {{\"setup\": \"Why don't planes ever get tired?\", \"punchline\": \"Because they have rest wings!\", \"rating\": 2}}\n",
"\n",
"You can also use tool calling directly to allow the model to choose between options, if your chosen model supports it. This involves a bit more parsing and setup. See [this how-to guide](/docs/how_to/tool_calling/) for more details."
"example_user: Tell me another joke about planes\n",
"example_assistant: {{\"setup\": \"Cargo\", \"punchline\": \"Cargo 'vroom vroom', but planes go 'zoom zoom'!\", \"rating\": 10}}\n",
"\n",
"example_user: Now about caterpillars\n",
"example_assistant: {{\"setup\": \"Caterpillar\", \"punchline\": \"Caterpillar really slow, but watch me turn into a butterfly and steal the show!\", \"rating\": 5}}\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([(\"system\", system), (\"human\", \"{input}\")])\n",
"\n",
"few_shot_structured_llm = prompt | structured_llm\n",
"few_shot_structured_llm.invoke(\"what's something funny about woodpeckers\")"
]
},
{
"cell_type": "markdown",
"id": "3c12b389-153d-44d1-af34-37e5b926d3db",
"metadata": {},
"source": [
"When the underlying method for structuring outputs is tool calling, we can pass in our examples as explicit tool calls. You can check if the model you're using makes use of tool calling in its API reference."
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "d7381cb0-b2c3-4302-a319-ed72d0b9e43f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'setup': 'Crocodile',\n",
" 'punchline': \"Crocodile 'see you later', but in a while, it becomes an alligator!\",\n",
" 'rating': 7}"
]
},
"execution_count": 46,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.messages import AIMessage, HumanMessage, ToolMessage\n",
"\n",
"examples = [\n",
" HumanMessage(\"Tell me a joke about planes\", name=\"example_user\"),\n",
" AIMessage(\n",
" \"\",\n",
" name=\"example_assistant\",\n",
" tool_calls=[\n",
" {\n",
" \"name\": \"joke\",\n",
" \"args\": {\n",
" \"setup\": \"Why don't planes ever get tired?\",\n",
" \"punchline\": \"Because they have rest wings!\",\n",
" \"rating\": 2,\n",
" },\n",
" \"id\": \"1\",\n",
" }\n",
" ],\n",
" ),\n",
" # Most tool-calling models expect a ToolMessage(s) to follow an AIMessage with tool calls.\n",
" ToolMessage(\"\", tool_call_id=\"1\"),\n",
" # Some models also expect an AIMessage to follow any ToolMessages,\n",
" # so you may need to add an AIMessage here.\n",
" HumanMessage(\"Tell me another joke about planes\", name=\"example_user\"),\n",
" AIMessage(\n",
" \"\",\n",
" name=\"example_assistant\",\n",
" tool_calls=[\n",
" {\n",
" \"name\": \"joke\",\n",
" \"args\": {\n",
" \"setup\": \"Cargo\",\n",
" \"punchline\": \"Cargo 'vroom vroom', but planes go 'zoom zoom'!\",\n",
" \"rating\": 10,\n",
" },\n",
" \"id\": \"2\",\n",
" }\n",
" ],\n",
" ),\n",
" ToolMessage(\"\", tool_call_id=\"2\"),\n",
" HumanMessage(\"Now about caterpillars\", name=\"example_user\"),\n",
" AIMessage(\n",
" \"\",\n",
" tool_calls=[\n",
" {\n",
" \"name\": \"joke\",\n",
" \"args\": {\n",
" \"setup\": \"Caterpillar\",\n",
" \"punchline\": \"Caterpillar really slow, but watch me turn into a butterfly and steal the show!\",\n",
" \"rating\": 5,\n",
" },\n",
" \"id\": \"3\",\n",
" }\n",
" ],\n",
" ),\n",
" ToolMessage(\"\", tool_call_id=\"3\"),\n",
"]\n",
"system = \"\"\"You are a hilarious comedian. Your specialty is knock-knock jokes. \\\n",
"Return a joke which has the setup (the response to \"Who's there?\") \\\n",
"and the final punchline (the response to \"<setup> who?\").\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system), (\"placeholder\", \"{examples}\"), (\"human\", \"{input}\")]\n",
")\n",
"few_shot_structured_llm = prompt | structured_llm\n",
"few_shot_structured_llm.invoke({\"input\": \"crocodiles\", \"examples\": examples})"
]
},
{
"cell_type": "markdown",
"id": "498d893b-ceaa-47ff-a9d8-4faa60702715",
"metadata": {},
"source": [
"For more on few shot prompting when using tool calling, see [here](/docs/how_to/function_calling/#Few-shot-prompting)."
]
},
{
@ -239,9 +469,17 @@
"id": "39d7a555",
"metadata": {},
"source": [
"### Specifying the output method (Advanced)\n",
"### (Advanced) Specifying the method for structuring outputs\n",
"\n",
"For models that support more than one means of outputting data, you can specify the preferred one like this:"
"For models that support more than one means of structuring outputs (i.e., they support both tool calling and JSON mode), you can specify which method to use with the `method=` argument.\n",
"\n",
":::info JSON mode\n",
"\n",
"If using JSON mode you'll have to still specify the desired schema in the model prompt. The schema you pass to `with_structured_output` will only be used for parsing the model outputs, it will not be passed to the model the way it is with tool calling.\n",
"\n",
"To see if the model you're using supports JSON mode, check its entry in the [API reference](https://api.python.langchain.com/en/latest/langchain_api_reference.html).\n",
"\n",
":::"
]
},
{
@ -253,7 +491,7 @@
{
"data": {
"text/plain": [
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!')"
"Joke(setup='Why was the cat sitting on the computer?', punchline='Because it wanted to keep an eye on the mouse!', rating=None)"
]
},
"execution_count": 6,
@ -274,13 +512,9 @@
"id": "5e92a98a",
"metadata": {},
"source": [
"In the above example, we use OpenAI's alternate JSON mode capability along with a more specific prompt.\n",
"## Prompting and parsing model directly\n",
"\n",
"For specifics about the model you choose, peruse its entry in the [API reference pages](https://api.python.langchain.com/en/latest/langchain_api_reference.html).\n",
"\n",
"## Prompting techniques\n",
"\n",
"You can also prompt models to outputting information in a given format. This approach relies on designing good prompts and then parsing the output of the models. This is the only option for models that don't support `.with_structured_output()` or other built-in approaches.\n",
"Not all models support `.with_structured_output()`, since not all models have tool calling or JSON mode support. For such models you'll need to directly prompt the model to use a specific format, and use an output parser to extract the structured response from the raw model output.\n",
"\n",
"### Using `PydanticOutputParser`\n",
"\n",
@ -289,7 +523,7 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 31,
"id": "6e514455",
"metadata": {},
"outputs": [],
@ -341,7 +575,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 37,
"id": "3d73d33d",
"metadata": {},
"outputs": [
@ -366,7 +600,7 @@
"source": [
"query = \"Anna is 23 years old and she is 6 feet tall\"\n",
"\n",
"print(prompt.format_prompt(query=query).to_string())"
"print(prompt.invoke(query).to_string())"
]
},
{
@ -542,25 +776,13 @@
"\n",
"chain.invoke({\"query\": query})"
]
},
{
"cell_type": "markdown",
"id": "7a39221a",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Now you've learned a few methods to make a model output structured data.\n",
"\n",
"To learn more, check out the other how-to guides in this section, or the conceptual guide on tool calling."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"display_name": "poetry-venv-2",
"language": "python",
"name": "python3"
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {

@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"# How to use a chat model to call tools\n",
"# How to use a model to call tools\n",
"\n",
":::info Prerequisites\n",
"\n",
@ -705,7 +705,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.1"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -1,450 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "7f219241",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 4\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "15780a65",
"metadata": {},
"source": [
"# How to use LangChain tools\n",
"\n",
"Tools are interfaces that an agent, chain, or LLM can use to interact with the world.\n",
"They combine a few things:\n",
"\n",
"1. The name of the tool\n",
"2. A description of what the tool is\n",
"3. JSON schema of what the inputs to the tool are\n",
"4. The function to call \n",
"5. Whether the result of a tool should be returned directly to the user\n",
"\n",
"It is useful to have all this information because this information can be used to build action-taking systems! The name, description, and JSON schema can be used to prompt the LLM so it knows how to specify what action to take, and then the function to call is equivalent to taking that action.\n",
"\n",
"The simpler the input to a tool is, the easier it is for an LLM to be able to use it.\n",
"Many agents will only work with tools that have a single string input.\n",
"For a list of agent types and which ones work with more complicated inputs, please see [this documentation](https://python.langchain.com/v0.1/docs/modules/agents/agent_types/)\n",
"\n",
"Importantly, the name, description, and JSON schema (if used) are all used in the prompt. Therefore, it is really important that they are clear and describe exactly how the tool should be used. You may need to change the default name, description, or JSON schema if the LLM is not understanding how to use the tool.\n",
"\n",
"## Default Tools\n",
"\n",
"Let's take a look at how to work with tools. To do this, we'll work with a built in tool."
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "19297004",
"metadata": {},
"outputs": [],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper"
]
},
{
"cell_type": "markdown",
"id": "1098e51a",
"metadata": {},
"source": [
"Now we initialize the tool. This is where we can configure it as we please"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "27a48655",
"metadata": {},
"outputs": [],
"source": [
"api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n",
"tool = WikipediaQueryRun(api_wrapper=api_wrapper)"
]
},
{
"cell_type": "markdown",
"id": "7db48439",
"metadata": {},
"source": [
"This is the default name"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "50f1ece1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Wikipedia'"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.name"
]
},
{
"cell_type": "markdown",
"id": "075499b1",
"metadata": {},
"source": [
"This is the default description"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "e9be09e2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'A wrapper around Wikipedia. Useful for when you need to answer general questions about people, places, companies, facts, historical events, or other subjects. Input should be a search query.'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.description"
]
},
{
"cell_type": "markdown",
"id": "89c86b00",
"metadata": {},
"source": [
"This is the default JSON schema of the inputs"
]
},
{
"cell_type": "code",
"execution_count": 20,
"id": "963a2e8c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'query': {'title': 'Query', 'type': 'string'}}"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.args"
]
},
{
"cell_type": "markdown",
"id": "5c467a35",
"metadata": {},
"source": [
"We can see if the tool should return directly to the user"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "039334b3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"False"
]
},
"execution_count": 33,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.return_direct"
]
},
{
"cell_type": "markdown",
"id": "fc421b02",
"metadata": {},
"source": [
"We can call this tool with a dictionary input"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "6669a13c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run({\"query\": \"langchain\"})"
]
},
{
"cell_type": "markdown",
"id": "587d6a58",
"metadata": {},
"source": [
"We can also call this tool with a single string input. \n",
"We can do this because this tool expects only a single input.\n",
"If it required multiple inputs, we would not be able to do that."
]
},
{
"cell_type": "code",
"execution_count": 26,
"id": "8cb23935",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 26,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run(\"langchain\")"
]
},
{
"cell_type": "markdown",
"id": "19eee1d5",
"metadata": {},
"source": [
"## Customizing Default Tools\n",
"We can also modify the built in name, description, and JSON schema of the arguments.\n",
"\n",
"When defining the JSON schema of the arguments, it is important that the inputs remain the same as the function, so you shouldn't change that. But you can define custom descriptions for each input easily."
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "599c4da7",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class WikiInputs(BaseModel):\n",
" \"\"\"Inputs to the wikipedia tool.\"\"\"\n",
"\n",
" query: str = Field(\n",
" description=\"query to look up in Wikipedia, should be 3 or less words\"\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 34,
"id": "6bde63e1",
"metadata": {},
"outputs": [],
"source": [
"tool = WikipediaQueryRun(\n",
" name=\"wiki-tool\",\n",
" description=\"look up things in wikipedia\",\n",
" args_schema=WikiInputs,\n",
" api_wrapper=api_wrapper,\n",
" return_direct=True,\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"id": "eeaa1d9a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'wiki-tool'"
]
},
"execution_count": 29,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.name"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "7599d88c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'look up things in wikipedia'"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.description"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "80042cb1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'query': {'title': 'Query',\n",
" 'description': 'query to look up in Wikipedia, should be 3 or less words',\n",
" 'type': 'string'}}"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.args"
]
},
{
"cell_type": "code",
"execution_count": 35,
"id": "8455fb9e",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 35,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.return_direct"
]
},
{
"cell_type": "code",
"execution_count": 32,
"id": "86f731a8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'Page: LangChain\\nSummary: LangChain is a framework designed to simplify the creation of applications '"
]
},
"execution_count": 32,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"tool.run(\"langchain\")"
]
},
{
"cell_type": "markdown",
"id": "c5b8b6bc",
"metadata": {},
"source": [
"## More Topics\n",
"\n",
"This was a quick introduction to tools in LangChain, but there is a lot more to learn\n",
"\n",
"**[Built-In Tools](/docs/integrations/tools/)**: For a list of all built-in tools, see [this page](/docs/integrations/tools/)\n",
" \n",
"**[Custom Tools](/docs/how_to/custom_tools)**: Although built-in tools are useful, it's highly likely that you'll have to define your own tools. See [this guide](/docs/how_to/custom_tools) for instructions on how to do so.\n",
" \n",
"**[Toolkits](/docs/how_to/toolkits)**: Toolkits are collections of tools that work well together. For a more in depth description as well as a list of all built-in toolkits, see [this page](/docs/how_to/toolkits)\n",
"\n",
"**[Tools as OpenAI Functions](/docs/how_to/tools_as_openai_functions/)**: Tools are very similar to OpenAI Functions, and can easily be converted to that format. See [this notebook](/docs/how_to/tools_as_openai_functions) for instructions on how to do that.\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78e2d0b3",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,236 @@
{
"cells": [
{
"cell_type": "raw",
"id": "7f219241",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 4\n",
"sidebar_class_name: hidden\n",
"---"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "e8f68de0-7df7-4bfd-9207-3258431426ef",
"metadata": {},
"source": [
"# How to use built-in tools and toolkits\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [LangChain Toolkits](/docs/concepts/#tools)\n",
"\n",
":::\n",
"\n",
"## Tools\n",
"\n",
"LangChain has a large collection of 3rd party tools. Please visit [Tool Integrations](/docs/integrations/tools/) for a list of the available tools.\n",
"\n",
":::{.callout-important}\n",
"\n",
"When using 3rd party tools, make sure that you understand how the tool works, what permissions\n",
"it has. Read over its documentation and check if anything is required from you\n",
"from a security point of view. Please see our [security](https://python.langchain.com/v0.1/docs/security/) \n",
"guidelines for more information.\n",
"\n",
":::\n",
"\n",
"Let's try out the [Wikipedia integration](/docs/integrations/tools/wikipedia/)."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "84f70856-b865-4658-9930-7577fb4712ce",
"metadata": {},
"outputs": [],
"source": [
"!pip install -qU wikipedia"
]
},
{
"cell_type": "code",
"execution_count": 51,
"id": "b4eaed85-c5a6-4ba9-b401-40258b0131c2",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Page: LangChain\n",
"Summary: LangChain is a framework designed to simplify the creation of applications \n"
]
}
],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"\n",
"api_wrapper = WikipediaAPIWrapper(top_k_results=1, doc_content_chars_max=100)\n",
"tool = WikipediaQueryRun(api_wrapper=api_wrapper)\n",
"\n",
"print(tool.invoke({\"query\": \"langchain\"}))"
]
},
{
"cell_type": "markdown",
"id": "cb870984-52d5-4453-be35-7072a08c6c14",
"metadata": {},
"source": [
"The tool has the following defaults associated with it:"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "7f094f01-2e98-4947-acc4-0846963a96e0",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"returns directly?: True\n"
]
}
],
"source": [
"print(f\"Name: {tool.name}\")\n",
"print(f\"Description: {tool.description}\")\n",
"print(f\"args schema: {tool.args}\")\n",
"print(f\"returns directly?: {tool.return_direct}\")"
]
},
{
"cell_type": "markdown",
"id": "19eee1d5",
"metadata": {},
"source": [
"## Customizing Default Tools\n",
"We can also modify the built in name, description, and JSON schema of the arguments.\n",
"\n",
"When defining the JSON schema of the arguments, it is important that the inputs remain the same as the function, so you shouldn't change that. But you can define custom descriptions for each input easily."
]
},
{
"cell_type": "code",
"execution_count": 56,
"id": "1365784c-e666-41c8-a1bb-e50f822b5936",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Page: LangChain\n",
"Summary: LangChain is a framework designed to simplify the creation of applications \n"
]
}
],
"source": [
"from langchain_community.tools import WikipediaQueryRun\n",
"from langchain_community.utilities import WikipediaAPIWrapper\n",
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
"\n",
"class WikiInputs(BaseModel):\n",
" \"\"\"Inputs to the wikipedia tool.\"\"\"\n",
"\n",
" query: str = Field(\n",
" description=\"query to look up in Wikipedia, should be 3 or less words\"\n",
" )\n",
"\n",
"\n",
"tool = WikipediaQueryRun(\n",
" name=\"wiki-tool\",\n",
" description=\"look up things in wikipedia\",\n",
" args_schema=WikiInputs,\n",
" api_wrapper=api_wrapper,\n",
" return_direct=True,\n",
")\n",
"\n",
"print(tool.run(\"langchain\"))"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "6e8850d6-6840-443e-a2be-adf64b30975c",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Name: wiki-tool\n",
"Description: look up things in wikipedia\n",
"args schema: {'query': {'title': 'Query', 'description': 'query to look up in Wikipedia, should be 3 or less words', 'type': 'string'}}\n",
"returns directly?: True\n"
]
}
],
"source": [
"print(f\"Name: {tool.name}\")\n",
"print(f\"Description: {tool.description}\")\n",
"print(f\"args schema: {tool.args}\")\n",
"print(f\"returns directly?: {tool.return_direct}\")"
]
},
{
"attachments": {},
"cell_type": "markdown",
"id": "acf0c2f7-ddc6-4633-8cef-59f234321e5c",
"metadata": {},
"source": [
"## How to use built-in toolkits\n",
"\n",
"Toolkits are collections of tools that are designed to be used together for specific tasks. They have convenient loading methods.\n",
"\n",
"For a complete list of available ready-made toolkits, visit [Integrations](/docs/integrations/toolkits/).\n",
"\n",
"All Toolkits expose a `get_tools` method which returns a list of tools.\n",
"\n",
"You're usually meant to use them this way:\n",
"\n",
"```python\n",
"# Initialize a toolkit\n",
"toolkit = ExampleTookit(...)\n",
"\n",
"# Get list of tools\n",
"tools = toolkit.get_tools()\n",
"```"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -335,7 +335,7 @@
"id": "616f9714-5b18-4eed-b88a-d38e4cb1de99",
"metadata": {},
"source": [
"Agents are also great because they make it easy to use multiple tools. To learn how to build Chains that use multiple tools, check out the [Chains with multiple tools](/docs/how_to/tools_multiple) page."
"Agents are also great because they make it easy to use multiple tools."
]
},
{
@ -457,21 +457,6 @@
"source": [
"Check out the [LangSmith trace here](https://smith.langchain.com/public/eeeb27a4-a2f8-4f06-a3af-9c983f76146c/r)."
]
},
{
"cell_type": "markdown",
"id": "b0e4b7f4-58ce-4ca0-a986-d05a436a7ccf",
"metadata": {},
"source": [
"## Next steps\n",
"\n",
"Here we've gone over the basic ways to use Tools with Chains and Agents. We recommend the following sections to explore next:\n",
"\n",
"- [Agents](/docs/tutorials/agents): Everything related to Agents.\n",
"- [Choosing between multiple tools](/docs/how_to/tools_multiple): How to make tool chains that select from multiple tools.\n",
"- [Prompting for tool use](/docs/how_to/tools_prompting): How to make tool chains that prompt models directly, without using function-calling APIs.\n",
"- [Parallel tool use](/docs/how_to/tools_parallel): How to make tool chains that invoke multiple tools at once."
]
}
],
"metadata": {
@ -490,7 +475,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -7,7 +7,16 @@
"source": [
"# How to add a human-in-the-loop for tools\n",
"\n",
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked."
"There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.\n",
"\n",
":::{.callout-info}\n",
"\n",
"This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal.\n",
"\n",
"To build a production application, you will need to do more work to keep track of application state appropriately.\n",
"\n",
"We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/).\n",
":::\n"
]
},
{
@ -40,7 +49,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 8,
"id": "2bed0ccf-20cc-4fd3-9947-55471dd8c4da",
"metadata": {},
"outputs": [],
@ -55,13 +64,19 @@
},
{
"cell_type": "markdown",
"id": "43721981-4595-4721-bea0-5c67696426d3",
"id": "7ecd5d7e-7c3c-4180-8958-7db2c1e43564",
"metadata": {},
"source": [
"## Chain\n",
"\n",
"Suppose we have the following (dummy) tools and tool-calling chain:\n",
"\n",
"Let's create a few simple (dummy) tools and a tool-calling chain:"
]
},
{
"cell_type": "markdown",
"id": "43721981-4595-4721-bea0-5c67696426d3",
"metadata": {},
"source": [
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
@ -71,13 +86,13 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "e0ff02ac-e750-493b-9b09-4578711a6726",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"# | outout: false\n",
"\n",
"from langchain_anthropic import ChatAnthropic\n",
"\n",
@ -86,7 +101,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"id": "0221fdfd-2a18-4449-a123-e6b0b15bb3d9",
"metadata": {},
"outputs": [
@ -95,17 +110,16 @@
"text/plain": [
"[{'name': 'count_emails',\n",
" 'args': {'last_n_days': 5},\n",
" 'id': 'toolu_012VHuh7vk5dVNct5SgZj3gh',\n",
" 'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW',\n",
" 'output': 10}]"
]
},
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List\n",
"\n",
"from langchain_core.messages import AIMessage\n",
@ -149,12 +163,14 @@
"source": [
"## Adding human approval\n",
"\n",
"We can add a simple human approval step to our tool_chain function:"
"Let's add a step in the chain that will ask a person to approve or reject the tall call request.\n",
"\n",
"On rejection, the step will raise an exception which will stop execution of the rest of the chain."
]
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 12,
"id": "341fb055-0315-47bc-8f72-ed6103d2981f",
"metadata": {},
"outputs": [],
@ -162,23 +178,35 @@
"import json\n",
"\n",
"\n",
"def human_approval(msg: AIMessage) -> Runnable:\n",
"class NotApproved(Exception):\n",
" \"\"\"Custom exception.\"\"\"\n",
"\n",
"\n",
"def human_approval(msg: AIMessage) -> AIMessage:\n",
" \"\"\"Responsible for passing through its input or raising an exception.\n",
"\n",
" Args:\n",
" msg: output from the chat model\n",
"\n",
" Returns:\n",
" msg: original output from the msg\n",
" \"\"\"\n",
" tool_strs = \"\\n\\n\".join(\n",
" json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls\n",
" )\n",
" input_msg = (\n",
" f\"Do you approve of the following tool invocations\\n\\n{tool_strs}\\n\\n\"\n",
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\"\n",
" \"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\\n >>>\"\n",
" )\n",
" resp = input(input_msg)\n",
" if resp.lower() not in (\"yes\", \"y\"):\n",
" raise ValueError(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
" raise NotApproved(f\"Tool invocations not approved:\\n\\n{tool_strs}\")\n",
" return msg"
]
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 13,
"id": "25dca07b-56ca-4b94-9955-d4f3e9895e03",
"metadata": {},
"outputs": [
@ -193,10 +221,11 @@
" \"args\": {\n",
" \"last_n_days\": 5\n",
" },\n",
" \"id\": \"toolu_01LCpjpFxrRspygDscnHYyPm\"\n",
" \"id\": \"toolu_01WbD8XeMoQaRFtsZezfsHor\"\n",
"}\n",
"\n",
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. yes\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
" >>> yes\n"
]
},
{
@ -204,11 +233,11 @@
"text/plain": [
"[{'name': 'count_emails',\n",
" 'args': {'last_n_days': 5},\n",
" 'id': 'toolu_01LCpjpFxrRspygDscnHYyPm',\n",
" 'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor',\n",
" 'output': 10}]"
]
},
"execution_count": 10,
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
@ -220,7 +249,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 14,
"id": "f558f2cd-847b-4ef9-a770-3961082b540c",
"metadata": {},
"outputs": [
@ -233,45 +262,41 @@
"{\n",
" \"name\": \"send_email\",\n",
" \"args\": {\n",
" \"message\": \"What's up homie\",\n",
" \"recipient\": \"sally@gmail.com\"\n",
" \"recipient\": \"sally@gmail.com\",\n",
" \"message\": \"What's up homie\"\n",
" },\n",
" \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n",
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
"}\n",
"\n",
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no. no\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n",
" >>> no\n"
]
},
{
"ename": "ValueError",
"evalue": "Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValueError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[11], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mchain\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mSend sally@gmail.com an email saying \u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43mWhat\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43ms up homie\u001b[39;49m\u001b[38;5;124;43m'\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:2499\u001b[0m, in \u001b[0;36mRunnableSequence.invoke\u001b[0;34m(self, input, config)\u001b[0m\n\u001b[1;32m 2497\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m 2498\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i, step \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msteps):\n\u001b[0;32m-> 2499\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mstep\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minvoke\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2500\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2501\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;66;43;03m# mark each step as a child run\u001b[39;49;00m\n\u001b[1;32m 2502\u001b[0m \u001b[43m \u001b[49m\u001b[43mpatch_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 2503\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcallbacks\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mrun_manager\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mget_child\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43mf\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mseq:step:\u001b[39;49m\u001b[38;5;132;43;01m{\u001b[39;49;00m\u001b[43mi\u001b[49m\u001b[38;5;241;43m+\u001b[39;49m\u001b[38;5;241;43m1\u001b[39;49m\u001b[38;5;132;43;01m}\u001b[39;49;00m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2504\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 2505\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 2506\u001b[0m \u001b[38;5;66;03m# finish the root run\u001b[39;00m\n\u001b[1;32m 2507\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3961\u001b[0m, in \u001b[0;36mRunnableLambda.invoke\u001b[0;34m(self, input, config, **kwargs)\u001b[0m\n\u001b[1;32m 3959\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Invoke this runnable synchronously.\"\"\"\u001b[39;00m\n\u001b[1;32m 3960\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mhasattr\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfunc\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m-> 3961\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_call_with_config\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3962\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_invoke\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3963\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3964\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_config\u001b[49m\u001b[43m(\u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m)\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3965\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 3966\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3967\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[1;32m 3968\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m(\n\u001b[1;32m 3969\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mCannot invoke a coroutine function synchronously.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3970\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUse `ainvoke` instead.\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 3971\u001b[0m )\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:1625\u001b[0m, in \u001b[0;36mRunnable._call_with_config\u001b[0;34m(self, func, input, config, run_type, **kwargs)\u001b[0m\n\u001b[1;32m 1621\u001b[0m context \u001b[38;5;241m=\u001b[39m copy_context()\n\u001b[1;32m 1622\u001b[0m context\u001b[38;5;241m.\u001b[39mrun(var_child_runnable_config\u001b[38;5;241m.\u001b[39mset, child_config)\n\u001b[1;32m 1623\u001b[0m output \u001b[38;5;241m=\u001b[39m cast(\n\u001b[1;32m 1624\u001b[0m Output,\n\u001b[0;32m-> 1625\u001b[0m \u001b[43mcontext\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 1626\u001b[0m \u001b[43m \u001b[49m\u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1627\u001b[0m \u001b[43m \u001b[49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1628\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore[arg-type]\u001b[39;49;00m\n\u001b[1;32m 1629\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1630\u001b[0m \u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1631\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 1632\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m,\n\u001b[1;32m 1633\u001b[0m )\n\u001b[1;32m 1634\u001b[0m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[1;32m 1635\u001b[0m run_manager\u001b[38;5;241m.\u001b[39mon_chain_error(e)\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/base.py:3835\u001b[0m, in \u001b[0;36mRunnableLambda._invoke\u001b[0;34m(self, input, run_manager, config, **kwargs)\u001b[0m\n\u001b[1;32m 3833\u001b[0m output \u001b[38;5;241m=\u001b[39m chunk\n\u001b[1;32m 3834\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m-> 3835\u001b[0m output \u001b[38;5;241m=\u001b[39m \u001b[43mcall_func_with_variable_args\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 3836\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfunc\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mrun_manager\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[1;32m 3837\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 3838\u001b[0m \u001b[38;5;66;03m# If the output is a runnable, invoke it\u001b[39;00m\n\u001b[1;32m 3839\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(output, Runnable):\n",
"File \u001b[0;32m~/langchain/libs/core/langchain_core/runnables/config.py:347\u001b[0m, in \u001b[0;36mcall_func_with_variable_args\u001b[0;34m(func, input, config, run_manager, **kwargs)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m run_manager \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mand\u001b[39;00m accepts_run_manager(func):\n\u001b[1;32m 346\u001b[0m kwargs[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrun_manager\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;241m=\u001b[39m run_manager\n\u001b[0;32m--> 347\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"Cell \u001b[0;32mIn[9], line 14\u001b[0m, in \u001b[0;36mhuman_approval\u001b[0;34m(msg)\u001b[0m\n\u001b[1;32m 12\u001b[0m resp \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m(input_msg)\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m resp\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m (\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124myes\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124my\u001b[39m\u001b[38;5;124m\"\u001b[39m):\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mTool invocations not approved:\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mtool_strs\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m msg\n",
"\u001b[0;31mValueError\u001b[0m: Tool invocations not approved:\n\n{\n \"name\": \"send_email\",\n \"args\": {\n \"message\": \"What's up homie\",\n \"recipient\": \"sally@gmail.com\"\n },\n \"id\": \"toolu_0158qJVd1AL32Y1xxYUAtNEy\"\n}"
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"Tool invocations not approved:\n",
"\n",
"{\n",
" \"name\": \"send_email\",\n",
" \"args\": {\n",
" \"recipient\": \"sally@gmail.com\",\n",
" \"message\": \"What's up homie\"\n",
" },\n",
" \"id\": \"toolu_014XccHFzBiVcc9GV1harV9U\"\n",
"}\n"
]
}
],
"source": [
"chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")"
"try:\n",
" chain.invoke(\"Send sally@gmail.com an email saying 'What's up homie'\")\n",
"except NotApproved as e:\n",
" print()\n",
" print(e)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e938d8f1-df93-4726-a465-78e596312246",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@ -290,7 +315,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -1,273 +0,0 @@
{
"cells": [
{
"cell_type": "raw",
"id": "1ea1fe24-fe1e-463b-a52c-79f0ef02328e",
"metadata": {},
"source": [
"---\n",
"sidebar_position: 2\n",
"---"
]
},
{
"cell_type": "markdown",
"id": "95982bf1-7d9d-4dd6-a4ad-9de0719fe17f",
"metadata": {},
"source": [
"# How to use an LLM to choose between multiple tools\n",
"\n",
"In our [Quickstart](/docs/how_to/tool_calling) we went over how to build a Chain that calls a single `multiply` tool. Now let's take a look at how we might augment this chain so that it can pick from a number of tools to call. We'll focus on Chains since [Agents](/docs/tutorials/agents) can route between multiple tools by default."
]
},
{
"cell_type": "markdown",
"id": "3fafec38-443a-42ad-a913-5be7667e3734",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We'll need to install the following packages for this guide:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78411bf1-0117-4f33-a3d7-f3d77a97bb78",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core"
]
},
{
"cell_type": "markdown",
"id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542",
"metadata": {},
"source": [
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "4185e74b-0500-4cad-ace0-bac37de466ac",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "d28159f5-b7d0-4385-aa44-4cd1b64507bb",
"metadata": {},
"source": [
"## Tools\n",
"\n",
"Recall we already had a `multiply` tool:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "e13ec98c-8521-4d63-b521-caf92da87b70",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int"
]
},
{
"cell_type": "markdown",
"id": "3de233af-b3bd-4f0c-8b1a-83527143a8db",
"metadata": {},
"source": [
"And now we can add to it an `exponentiate` and `add` tool:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "e93661cd-a2ba-4ada-91ad-baf1b60879ec",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
]
},
{
"cell_type": "markdown",
"id": "bbea4555-ed10-4a18-b802-e9a3071f132b",
"metadata": {},
"source": [
"The main difference between using one Tool and many is that we can't be sure which Tool the model will invoke upfront, so we cannot hardcode, like we did in the [Quickstart](/docs/how_to/tool_calling), a specific tool into our chain. Instead we'll add `call_tools`, a `RunnableLambda` that takes the output AI message with tools calls and routes to the correct tools.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f00f0f3f-8530-4c1d-a26c-d20824e31faf",
"metadata": {},
"outputs": [],
"source": [
"from langchain_anthropic import ChatAnthropic\n",
"\n",
"llm = ChatAnthropic(model=\"claude-3-sonnet-20240229\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List, Union\n",
"\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
" RunnableMap,\n",
" RunnablePassthrough,\n",
")\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"llm_with_tools = llm.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",
"def call_tools(msg: AIMessage) -> Runnable:\n",
" \"\"\"Simple sequential tool calling helper.\"\"\"\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" tool_calls = msg.tool_calls.copy()\n",
" for tool_call in tool_calls:\n",
" tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n",
" return tool_calls\n",
"\n",
"\n",
"chain = llm_with_tools | call_tools"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'multiply',\n",
" 'args': {'first_int': 23, 'second_int': 7},\n",
" 'id': 'toolu_01Wf8kUs36kxRKLDL8vs7G8q',\n",
" 'output': 161}]"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"What's 23 times 7\")"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "b1c6c0f8-6d04-40d4-a40e-8719ca7b27c2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'add',\n",
" 'args': {'first_int': 1000000, 'second_int': 1000000000},\n",
" 'id': 'toolu_012aK4xZBQg2sXARsFZnqxHh',\n",
" 'output': 1001000000}]"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"add a million plus a billion\")"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "ce76f299-1a4d-421c-afa4-a6346e34285c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'exponentiate',\n",
" 'args': {'base': 37, 'exponent': 3},\n",
" 'id': 'toolu_01VDU6X3ugDb9cpnnmCZFPbC',\n",
" 'output': 50653}]"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\"cube thirty-seven\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,215 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "95982bf1-7d9d-4dd6-a4ad-9de0719fe17f",
"metadata": {},
"source": [
"# How to call tools in parallel\n",
"\n",
"In the [Chains with multiple tools](/docs/how_to/tools_multiple) guide we saw how to build function-calling chains that select between multiple tools. Some models, like the OpenAI models released in Fall 2023, also support parallel function calling, which allows you to invoke multiple functions (or the same function multiple times) in a single model call. Our previous chain from the multiple tools guides actually already supports this."
]
},
{
"cell_type": "markdown",
"id": "3fafec38-443a-42ad-a913-5be7667e3734",
"metadata": {},
"source": [
"## Setup\n",
"\n",
"We'll need to install the following packages for this guide:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "78411bf1-0117-4f33-a3d7-f3d77a97bb78",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-core"
]
},
{
"cell_type": "markdown",
"id": "59d08fd0-ddd9-4c74-bcea-a5ca3a86e542",
"metadata": {},
"source": [
"If you'd like to trace your runs in [LangSmith](/docs/langsmith/) uncomment and set the following environment variables:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "4185e74b-0500-4cad-ace0-bac37de466ac",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "d28159f5-b7d0-4385-aa44-4cd1b64507bb",
"metadata": {},
"source": [
"## Tools"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "e13ec98c-8521-4d63-b521-caf92da87b70",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int\n",
"\n",
"\n",
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
]
},
{
"cell_type": "markdown",
"id": "119d419c-1c61-4e0d-834a-5dabb72f5514",
"metadata": {},
"source": [
"# Chain\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" hideGoogle=\"true\"/>\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f67d91d8-cc38-4065-8f80-901e079954dd",
"metadata": {},
"outputs": [],
"source": [
"# | echo: false\n",
"# | output: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo-0125\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "c35359ae-a740-48c5-b5e7-1a377fb25aa2",
"metadata": {},
"outputs": [],
"source": [
"from operator import itemgetter\n",
"from typing import Dict, List, Union\n",
"\n",
"from langchain_core.messages import AIMessage\n",
"from langchain_core.runnables import (\n",
" Runnable,\n",
" RunnableLambda,\n",
" RunnableMap,\n",
" RunnablePassthrough,\n",
")\n",
"\n",
"tools = [multiply, exponentiate, add]\n",
"llm_with_tools = llm.bind_tools(tools)\n",
"tool_map = {tool.name: tool for tool in tools}\n",
"\n",
"\n",
"def call_tools(msg: AIMessage) -> Runnable:\n",
" \"\"\"Simple sequential tool calling helper.\"\"\"\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" tool_calls = msg.tool_calls.copy()\n",
" for tool_call in tool_calls:\n",
" tool_call[\"output\"] = tool_map[tool_call[\"name\"]].invoke(tool_call[\"args\"])\n",
" return tool_calls\n",
"\n",
"\n",
"chain = llm_with_tools | call_tools"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "ea6dbb32-ec9b-4c70-a90f-a2db93978cf1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'multiply',\n",
" 'args': {'first_int': 23, 'second_int': 7},\n",
" 'id': 'call_22tgOrsVLyLMsl2RLbUhtycw',\n",
" 'output': 161},\n",
" {'name': 'multiply',\n",
" 'args': {'first_int': 5, 'second_int': 18},\n",
" 'id': 'call_EbKHEG3TjqBhEwb7aoxUtgzf',\n",
" 'output': 90},\n",
" {'name': 'add',\n",
" 'args': {'first_int': 1000000, 'second_int': 1000000000},\n",
" 'id': 'call_LUhu2IT3vINxlTc5fCVY6Nhi',\n",
" 'output': 1001000000},\n",
" {'name': 'exponentiate',\n",
" 'args': {'base': 37, 'exponent': 3},\n",
" 'id': 'call_bnCZIXelOKkmcyd4uGXId9Ct',\n",
" 'output': 50653}]"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"chain.invoke(\n",
" \"What's 23 times 7, and what's five times 18 and add a million plus a billion and cube thirty-seven\"\n",
")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.1"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -15,9 +15,30 @@
"id": "14b94240",
"metadata": {},
"source": [
"# How to use tools without function calling\n",
"# How to add ad-hoc tool calling capability to LLMs and Chat Models\n",
"\n",
"In this guide we'll build a Chain that does not rely on any special model APIs (like tool calling, which we showed in the [Quickstart](/docs/how_to/tool_calling)) and instead just prompts the model directly to invoke tools."
":::{.callout-caution}\n",
"\n",
"Some models have been fine-tuned for tool calling and provide a dedicated API for tool calling. Generally, such models are better at tool calling than non-fine-tuned models, and are recommended for use cases that require tool calling. Please see the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide for more information.\n",
"\n",
":::\n",
"\n",
":::info Prerequisites\n",
"\n",
"This guide assumes familiarity with the following concepts:\n",
"\n",
"- [LangChain Tools](/docs/concepts/#tools)\n",
"- [Function/tool calling](https://python.langchain.com/v0.2/docs/concepts/#functiontool-calling)\n",
"- [Chat models](/docs/concepts/#chat-models)\n",
"- [LLMs](/docs/concepts/#llms)\n",
"\n",
":::\n",
"\n",
"In this guide, we'll see how to add **ad-hoc** tool calling support to a chat model. This is an alternative method to invoke tools if you're using a model that does not natively support [tool calling](/docs/how_to/tool_calling/).\n",
"\n",
"We'll do this by simply writing a prompt that will get the model to invoke the appropriate tools. Here's a diagram of the logic:\n",
"\n",
"![chain](../../static/img/tool_chain.svg)"
]
},
{
@ -37,101 +58,134 @@
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain langchain-openai"
"%pip install --upgrade --quiet langchain langchain-community"
]
},
{
"cell_type": "markdown",
"id": "5e727d22-f861-4eee-882a-688f8efc885e",
"id": "897bc01e-cc2b-4400-8a64-db4aa56085d3",
"metadata": {},
"source": [
"And set these environment variables:"
"If you'd like to use LangSmith, uncomment the below:"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "527ef906-0104-4872-b4e5-f371cf73feba",
"execution_count": 26,
"id": "5efb4170-b95b-4d29-8f57-09509f3ba6df",
"metadata": {},
"outputs": [],
"source": [
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"OPENAI_API_KEY\"] = getpass.getpass()\n",
"\n",
"# If you'd like to use LangSmith, uncomment the below:\n",
"# os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"# os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()"
]
},
{
"cell_type": "markdown",
"id": "68946881",
"id": "7ec6409b-21e5-4d0a-8a46-c4ef0b055dd3",
"metadata": {},
"source": [
"## Create a tool\n",
"You can select any of the given models for this how-to guide. Keep in mind that most of these models already [support native tool calling](/docs/integrations/chat/), so using the prompting strategy shown here doesn't make sense for these models, and instead you should follow the [how to use a chat model to call tools](/docs/how_to/tool_calling/) guide.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"First, we need to create a tool to call. For this example, we will create a custom tool from a function. For more information on all details related to creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
"<ChatModelTabs openaiParams={`model=\"gpt-4\"`} />\n",
"```\n",
"\n",
"To illustrate the idea, we'll use `phi3` via Ollama, which does **NOT** have native support for tool calling. If you'd like to use `Ollama` as well follow [these instructions](/docs/integrations/chat/ollama/)."
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "90187d07",
"execution_count": 24,
"id": "424be968-2806-4d1a-a6aa-5499ae20fac5",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.tools import tool\n",
"from langchain_community.llms import Ollama\n",
"\n",
"model = Ollama(model=\"phi3\")"
]
},
{
"cell_type": "markdown",
"id": "68946881",
"metadata": {},
"source": [
"## Create a tool\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int"
"First, let's create an `add` and `multiply` tools. For more information on creating custom tools, please see [this guide](/docs/how_to/custom_tools)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d7009e1a",
"execution_count": 4,
"id": "4548e6fa-0f9b-4d7a-8fa5-66cec0350e5f",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"--\n",
"multiply\n",
"multiply(first_int: int, second_int: int) -> int - Multiply two integers together.\n",
"{'first_int': {'title': 'First Int', 'type': 'integer'}, 'second_int': {'title': 'Second Int', 'type': 'integer'}}\n"
"Multiply two numbers together.\n",
"{'x': {'title': 'X', 'type': 'number'}, 'y': {'title': 'Y', 'type': 'number'}}\n",
"--\n",
"add\n",
"Add two numbers.\n",
"{'x': {'title': 'X', 'type': 'integer'}, 'y': {'title': 'Y', 'type': 'integer'}}\n"
]
}
],
"source": [
"print(multiply.name)\n",
"print(multiply.description)\n",
"print(multiply.args)"
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(x: float, y: float) -> float:\n",
" \"\"\"Multiply two numbers together.\"\"\"\n",
" return x * y\n",
"\n",
"\n",
"@tool\n",
"def add(x: int, y: int) -> int:\n",
" \"Add two numbers.\"\n",
" return x + y\n",
"\n",
"\n",
"tools = [multiply, add]\n",
"\n",
"# Let's inspect the tools\n",
"for t in tools:\n",
" print(\"--\")\n",
" print(t.name)\n",
" print(t.description)\n",
" print(t.args)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "be77e780",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"20"
"20.0"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"multiply.invoke({\"first_int\": 4, \"second_int\": 5})"
"multiply.invoke({\"x\": 4, \"y\": 5})"
]
},
{
@ -146,48 +200,85 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "c64818f0-9364-423c-922e-bdfb8f01e726",
"execution_count": 6,
"id": "2063b564-25ca-4729-a45f-ba4633175b04",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"'multiply: multiply(first_int: int, second_int: int) -> int - Multiply two integers together.'"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"multiply(x: float, y: float) -> float - Multiply two numbers together.\n",
"add(x: int, y: int) -> int - Add two numbers.\n"
]
}
],
"source": [
"from langchain.tools.render import render_text_description\n",
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.tools import render_text_description\n",
"\n",
"rendered_tools = render_text_description([multiply])\n",
"rendered_tools"
"rendered_tools = render_text_description(tools)\n",
"print(rendered_tools)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "63552d4d-8bd6-4aca-8805-56e236f6552d",
"execution_count": 17,
"id": "f02f1dce-76e7-4ca9-9bac-5af496131fe1",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"\n",
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
"system_prompt = f\"\"\"\\\n",
"You are an assistant that has access to the following set of tools. \n",
"Here are the names and descriptions for each tool:\n",
"\n",
"{rendered_tools}\n",
"\n",
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
"Given the user input, return the name and input of the tool to use. \n",
"Return your response as a JSON blob with 'name' and 'arguments' keys.\n",
"\n",
"The `arguments` should be a dictionary, with keys corresponding \n",
"to the argument names and the values corresponding to the requested values.\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "f8623e03-60eb-4439-b57b-ecbcebc61b58",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{\n",
" \"name\": \"add\",\n",
" \"arguments\": {\n",
" \"x\": 3,\n",
" \"y\": 1132\n",
" }\n",
"}\n"
]
}
],
"source": [
"chain = prompt | model\n",
"message = chain.invoke({\"input\": \"what's 3 plus 1132\"})\n",
"\n",
"# Let's take a look at the output from the model\n",
"# if the model is an LLM (not a chat model), the output will be a string.\n",
"if isinstance(message, str):\n",
" print(message)\n",
"else: # Otherwise it's a chat model\n",
" print(message.content)"
]
},
{
"cell_type": "markdown",
"id": "14df2cd5-b6fa-4b10-892d-e8692c7931e5",
@ -200,156 +291,153 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 19,
"id": "f129f5bd-127c-4c95-8f34-8f437da7ca8f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'multiply', 'arguments': {'first_int': 13, 'second_int': 4}}"
"{'name': 'multiply', 'arguments': {'x': 13.0, 'y': 4.0}}"
]
},
"execution_count": 7,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from langchain_core.output_parsers import JsonOutputParser\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"model = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)\n",
"chain = prompt | model | JsonOutputParser()\n",
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
]
},
{
"cell_type": "markdown",
"id": "8e29dd4c-8eb5-457f-92d1-8add076404dc",
"id": "e1f08255-f146-4f4a-be43-5c21c1d3ae83",
"metadata": {},
"source": [
"## Invoking the tool\n",
":::{.callout-important}\n",
"\n",
"We can invoke the tool as part of the chain by passing along the model-generated \"arguments\" to it:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "0555b384-fde6-4404-86e0-7ea199003d58",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"52"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from operator import itemgetter\n",
"🎉 Amazing! 🎉 We now instructed our model on how to **request** that a tool be invoked.\n",
"\n",
"chain = prompt | model | JsonOutputParser() | itemgetter(\"arguments\") | multiply\n",
"chain.invoke({\"input\": \"what's thirteen times 4\"})"
"Now, let's create some logic to actually run the tool!\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "8d60b2cb-6ce0-48fc-8d18-d2337161a53d",
"id": "8e29dd4c-8eb5-457f-92d1-8add076404dc",
"metadata": {},
"source": [
"## Choosing from multiple tools\n",
"## Invoking the tool 🏃\n",
"\n",
"Now that the model can request that a tool be invoked, we need to write a function that can actually invoke \n",
"the tool.\n",
"\n",
"Suppose we have multiple tools we want the chain to be able to choose from:"
"The function will select the appropriate tool by name, and pass to it the arguments chosen by the model."
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "95c86d32-ee45-4c87-a28c-14eff19b49e9",
"execution_count": 20,
"id": "faee95e0-4095-4310-991f-9e9465c6738e",
"metadata": {},
"outputs": [],
"source": [
"@tool\n",
"def add(first_int: int, second_int: int) -> int:\n",
" \"Add two integers.\"\n",
" return first_int + second_int\n",
"from typing import Any, Dict, Optional, TypedDict\n",
"\n",
"from langchain_core.runnables import RunnableConfig\n",
"\n",
"@tool\n",
"def exponentiate(base: int, exponent: int) -> int:\n",
" \"Exponentiate the base to the exponent power.\"\n",
" return base**exponent"
"\n",
"class ToolCallRequest(TypedDict):\n",
" \"\"\"A typed dict that shows the inputs into the invoke_tool function.\"\"\"\n",
"\n",
" name: str\n",
" arguments: Dict[str, Any]\n",
"\n",
"\n",
"def invoke_tool(\n",
" tool_call_request: ToolCallRequest, config: Optional[RunnableConfig] = None\n",
"):\n",
" \"\"\"A function that we can use the perform a tool invocation.\n",
"\n",
" Args:\n",
" tool_call_request: a dict that contains the keys name and arguments.\n",
" The name must match the name of a tool that exists.\n",
" The arguments are the arguments to that tool.\n",
" config: This is configuration information that LangChain uses that contains\n",
" things like callbacks, metadata, etc.See LCEL documentation about RunnableConfig.\n",
"\n",
" Returns:\n",
" output from the requested tool\n",
" \"\"\"\n",
" tool_name_to_tool = {tool.name: tool for tool in tools}\n",
" name = tool_call_request[\"name\"]\n",
" requested_tool = tool_name_to_tool[name]\n",
" return requested_tool.invoke(tool_call_request[\"arguments\"], config=config)"
]
},
{
"cell_type": "markdown",
"id": "748405ff-4c85-4bd7-82e1-30458b5a4106",
"id": "f4957532-9e0c-47f6-bb62-0fd789ac1d3e",
"metadata": {},
"source": [
"With function calling, we can do this like so:"
"Let's test this out 🧪!"
]
},
{
"cell_type": "markdown",
"id": "eb3aa89e-40e1-45ec-b1f3-ab28cfc8e42d",
"cell_type": "code",
"execution_count": 21,
"id": "d0ea3b2a-8fb2-4016-83c8-a5d3e78fedbc",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"15.0"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"If we want to run the model selected tool, we can do so using a function that returns the tool based on the model output. Specifically, our function will action return it's own subchain that gets the \"arguments\" part of the model output and passes it to the chosen tool:"
"invoke_tool({\"name\": \"multiply\", \"arguments\": {\"x\": 3, \"y\": 5}})"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "db254773-5b8e-43d0-aabe-c21566c154cd",
"cell_type": "markdown",
"id": "715af6e1-935d-4bc0-a3d2-646ecf8a329b",
"metadata": {},
"outputs": [],
"source": [
"tools = [add, exponentiate, multiply]\n",
"## Let's put it together\n",
"\n",
"\n",
"def tool_chain(model_output):\n",
" tool_map = {tool.name: tool for tool in tools}\n",
" chosen_tool = tool_map[model_output[\"name\"]]\n",
" return itemgetter(\"arguments\") | chosen_tool"
"Let's put it together into a chain that creates a calculator with add and multiplication capabilities."
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "ad9f5cff-b86a-45fc-9ce4-b0aa9025a378",
"execution_count": 22,
"id": "0555b384-fde6-4404-86e0-7ea199003d58",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1135"
"53.83784653"
]
},
"execution_count": 14,
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"rendered_tools = render_text_description(tools)\n",
"system_prompt = f\"\"\"You are an assistant that has access to the following set of tools. Here are the names and descriptions for each tool:\n",
"\n",
"{rendered_tools}\n",
"\n",
"Given the user input, return the name and input of the tool to use. Return your response as a JSON blob with 'name' and 'arguments' keys.\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [(\"system\", system_prompt), (\"user\", \"{input}\")]\n",
")\n",
"\n",
"chain = prompt | model | JsonOutputParser() | tool_chain\n",
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
"chain = prompt | model | JsonOutputParser() | invoke_tool\n",
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
]
},
{
@ -364,19 +452,19 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 23,
"id": "45404406-859d-4caa-8b9d-5838162c80a0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'name': 'add',\n",
" 'arguments': {'first_int': 3, 'second_int': 1132},\n",
" 'output': 1135}"
"{'name': 'multiply',\n",
" 'arguments': {'x': 13, 'y': 4.14137281},\n",
" 'output': 53.83784653}"
]
},
"execution_count": 15,
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
@ -385,9 +473,26 @@
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"chain = (\n",
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=tool_chain)\n",
" prompt | model | JsonOutputParser() | RunnablePassthrough.assign(output=invoke_tool)\n",
")\n",
"chain.invoke({\"input\": \"what's 3 plus 1132\"})"
"chain.invoke({\"input\": \"what's thirteen times 4.14137281\"})"
]
},
{
"cell_type": "markdown",
"id": "1797fe82-ea35-4cba-834a-1caf9740d184",
"metadata": {},
"source": [
"## What's next?\n",
"\n",
"This how-to guide shows the \"happy path\" when the model correctly outputs all the required tool information.\n",
"\n",
"In reality, if you're using more complex tools, you will start encountering errors from the model, especially for models that have not been fine tuned for tool calling and for less capable models.\n",
"\n",
"You will need to be prepared to add strategies to improve the output from the model; e.g.,\n",
"\n",
"1. Provide few shot examples.\n",
"2. Add error handling (e.g., catch the exception and feed it back to the LLM to ask it to correct its previous output)."
]
}
],
@ -407,7 +512,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
"version": "3.11.4"
}
},
"nbformat": 4,

@ -80,7 +80,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"id": "238bdbaa-526a-4130-89e9-523aa44bb196",
"metadata": {},
"outputs": [],
@ -250,16 +250,7 @@
"execution_count": 3,
"id": "42f87466-cb8e-490d-a9f8-aa0f8e9b4217",
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/bagatur/langchain/libs/core/langchain_core/_api/beta_decorator.py:87: LangChainBetaWarning: The function `bind_tools` is in beta. It is actively being worked on, so the API may change.\n",
" warn_beta(\n"
]
}
],
"outputs": [],
"source": [
"from langchain_core.pydantic_v1 import BaseModel, Field\n",
"\n",
@ -369,13 +360,49 @@
"id": "90e015e0-c6e5-4ff5-8fb9-be0cd3c86395",
"metadata": {},
"source": [
"::: {.callout-tip}\n",
":::tip\n",
"\n",
"ChatAnthropic model outputs are always a single AI message that can have either a single string or a list of content blocks. The content blocks can be text blocks or tool-duse blocks. There can be multiple of each and they can be interspersed.\n",
"\n",
":::"
]
},
{
"cell_type": "markdown",
"id": "b5145dea-0183-4cab-b9e2-0e35fb8370cf",
"metadata": {},
"source": [
"### Forcing tool calls\n",
"\n",
"By default the model can choose whether to call any tools. To force the model to call at least one tool we can specify `bind_tools(..., tool_choice=\"any\")` and to force the model to call a specific tool we can pass in that tool name `bind_tools(..., tool_choice=\"GetWeather\")`"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "05993626-060c-449f-8069-e52d31442977",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[{'name': 'GetWeather',\n",
" 'args': {'location': '<UNKNOWN>'},\n",
" 'id': 'toolu_01DwWjKzHPs6EHCUPxsGm9bN'}]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"llm_with_force_tools = llm.bind_tools([GetWeather], tool_choice=\"GetWeather\")\n",
"# Notice the model will still return tool calls despite a message that\n",
"# doesn't have anything to do with the tools.\n",
"llm_with_force_tools.invoke(\"this doesn't really require tool use\").tool_calls"
]
},
{
"cell_type": "markdown",
"id": "8652ee98-814c-4ed6-9def-275eeaa9651e",
@ -656,9 +683,9 @@
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"display_name": "poetry-venv-2",
"language": "python",
"name": "python3"
"name": "poetry-venv-2"
},
"language_info": {
"codemirror_mode": {
@ -670,7 +697,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.9.1"
}
},
"nbformat": 4,

@ -54,18 +54,9 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": null,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"outputs": [],
"source": [
"# Install Langchain community and core packages\n",
"%pip install --upgrade --quiet langchain-core langchain-community\n",
@ -123,126 +114,37 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>username</th>\n",
" <th>name</th>\n",
" <th>sex</th>\n",
" <th>address</th>\n",
" <th>mail</th>\n",
" <th>birthdate</th>\n",
" </tr>\n",
" <tr>\n",
" <th>id</th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" <th></th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>eduardo69</td>\n",
" <td>Haley Beck</td>\n",
" <td>F</td>\n",
" <td>59836 Carla Causeway Suite 939\\nPort Eugene, I...</td>\n",
" <td>meltondenise@yahoo.com</td>\n",
" <td>1997-11-23</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>lbarrera</td>\n",
" <td>Joshua Stephens</td>\n",
" <td>M</td>\n",
" <td>3108 Christina Forges\\nPort Timothychester, KY...</td>\n",
" <td>erica80@hotmail.com</td>\n",
" <td>1924-07-19</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>bburton</td>\n",
" <td>Paula Kaiser</td>\n",
" <td>F</td>\n",
" <td>Unit 7405 Box 3052\\nDPO AE 09858</td>\n",
" <td>timothypotts@gmail.com</td>\n",
" <td>1933-11-20</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>melissa49</td>\n",
" <td>Wendy Reese</td>\n",
" <td>F</td>\n",
" <td>6408 Christopher Hill Apt. 459\\nNew Benjamin, ...</td>\n",
" <td>dadams@gmail.com</td>\n",
" <td>1988-10-11</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>melissacarter</td>\n",
" <td>Manuel Rios</td>\n",
" <td>M</td>\n",
" <td>2241 Bell Gardens Suite 723\\nScottside, CA 38463</td>\n",
" <td>williamayala@gmail.com</td>\n",
" <td>1931-03-04</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" username name sex \\\n",
"id \n",
"0 eduardo69 Haley Beck F \n",
"1 lbarrera Joshua Stephens M \n",
"2 bburton Paula Kaiser F \n",
"3 melissa49 Wendy Reese F \n",
"4 melissacarter Manuel Rios M \n",
"\n",
" address mail \\\n",
"id \n",
"0 59836 Carla Causeway Suite 939\\nPort Eugene, I... meltondenise@yahoo.com \n",
"1 3108 Christina Forges\\nPort Timothychester, KY... erica80@hotmail.com \n",
"2 Unit 7405 Box 3052\\nDPO AE 09858 timothypotts@gmail.com \n",
"3 6408 Christopher Hill Apt. 459\\nNew Benjamin, ... dadams@gmail.com \n",
"4 2241 Bell Gardens Suite 723\\nScottside, CA 38463 williamayala@gmail.com \n",
"\n",
" birthdate \n",
"id \n",
"0 1997-11-23 \n",
"1 1924-07-19 \n",
"2 1933-11-20 \n",
"3 1988-10-11 \n",
"4 1931-03-04 "
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
" username name sex \\\n",
"id \n",
"0 eduardo69 Haley Beck F \n",
"1 lbarrera Joshua Stephens M \n",
"2 bburton Paula Kaiser F \n",
"3 melissa49 Wendy Reese F \n",
"4 melissacarter Manuel Rios M \n",
"\n",
" address mail \\\n",
"id \n",
"0 59836 Carla Causeway Suite 939\\nPort Eugene, I... meltondenise@yahoo.com \n",
"1 3108 Christina Forges\\nPort Timothychester, KY... erica80@hotmail.com \n",
"2 Unit 7405 Box 3052\\nDPO AE 09858 timothypotts@gmail.com \n",
"3 6408 Christopher Hill Apt. 459\\nNew Benjamin, ... dadams@gmail.com \n",
"4 2241 Bell Gardens Suite 723\\nScottside, CA 38463 williamayala@gmail.com \n",
"\n",
" birthdate \n",
"id \n",
"0 1997-12-01 \n",
"1 1924-07-27 \n",
"2 1933-11-28 \n",
"3 1988-10-19 \n",
"4 1931-03-12 \n"
]
}
],
"source": [
@ -263,7 +165,7 @@
"\n",
"\n",
"load_df = pd.DataFrame.from_records(data=profile_gen(100), index=\"id\")\n",
"load_df.head()"
"print(load_df.head())"
]
},
{
@ -279,85 +181,17 @@
"metadata": {},
"outputs": [
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>name</th>\n",
" <th>type</th>\n",
" <th>properties</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>username</td>\n",
" <td>string</td>\n",
" <td>[char32]</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>name</td>\n",
" <td>string</td>\n",
" <td>[char32]</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>sex</td>\n",
" <td>string</td>\n",
" <td>[char1]</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>address</td>\n",
" <td>string</td>\n",
" <td>[char64]</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>mail</td>\n",
" <td>string</td>\n",
" <td>[char32]</td>\n",
" </tr>\n",
" <tr>\n",
" <th>5</th>\n",
" <td>birthdate</td>\n",
" <td>long</td>\n",
" <td>[timestamp]</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" name type properties\n",
"0 username string [char32]\n",
"1 name string [char32]\n",
"2 sex string [char1]\n",
"3 address string [char64]\n",
"4 mail string [char32]\n",
"5 birthdate long [timestamp]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
" name type properties\n",
"0 username string [char32]\n",
"1 name string [char32]\n",
"2 sex string [char2]\n",
"3 address string [char64]\n",
"4 mail string [char32]\n",
"5 birthdate long [timestamp]\n"
]
}
],
"source": [
@ -372,7 +206,7 @@
")\n",
"\n",
"# See the Kinetica column types\n",
"gpudb_table.type_as_df()"
"print(gpudb_table.type_as_df())"
]
},
{
@ -394,10 +228,7 @@
{
"data": {
"text/plain": [
"{'status': 'OK',\n",
" 'message': '',\n",
" 'data_type': 'execute_sql_response',\n",
" 'response_time': 0.0148}"
"1"
]
},
"execution_count": 4,
@ -408,34 +239,23 @@
"source": [
"# create an LLM context for the table.\n",
"\n",
"from gpudb import GPUdbException\n",
"\n",
"sql = f\"\"\"\n",
"CREATE OR REPLACE CONTEXT {kinetica_ctx}\n",
"(\n",
" TABLE = demo.test_profiles\n",
" TABLE = {table_name}\n",
" COMMENT = 'Contains user profiles.'\n",
"),\n",
"(\n",
" SAMPLES = (\n",
" 'How many male users are there?' = \n",
" 'select count(1) as num_users\n",
" from demo.test_profiles\n",
" from {table_name}\n",
" where sex = ''M'';')\n",
")\n",
"\"\"\"\n",
"\n",
"\n",
"def _check_error(response: dict) -> None:\n",
" status = response[\"status_info\"][\"status\"]\n",
" if status != \"OK\":\n",
" message = response[\"status_info\"][\"message\"]\n",
" raise GPUdbException(\"[%s]: %s\" % (status, message))\n",
"\n",
"\n",
"response = kinetica_llm.kdbc.execute_sql(sql)\n",
"_check_error(response)\n",
"response[\"status_info\"]"
"count_affected = kinetica_llm.kdbc.execute(sql)\n",
"count_affected"
]
},
{
@ -462,16 +282,16 @@
"text": [
"================================\u001b[1m System Message \u001b[0m================================\n",
"\n",
"CREATE TABLE demo.test_profiles AS\n",
"CREATE TABLE demo.user_profiles AS\n",
"(\n",
" username VARCHAR (32) NOT NULL,\n",
" name VARCHAR (32) NOT NULL,\n",
" sex VARCHAR (1) NOT NULL,\n",
" sex VARCHAR (2) NOT NULL,\n",
" address VARCHAR (64) NOT NULL,\n",
" mail VARCHAR (32) NOT NULL,\n",
" birthdate TIMESTAMP NOT NULL\n",
");\n",
"COMMENT ON TABLE demo.test_profiles IS 'Contains user profiles.';\n",
"COMMENT ON TABLE demo.user_profiles IS 'Contains user profiles.';\n",
"\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
"\n",
@ -480,7 +300,7 @@
"==================================\u001b[1m Ai Message \u001b[0m==================================\n",
"\n",
"select count(1) as num_users\n",
" from demo.test_profiles\n",
" from demo.user_profiles\n",
" where sex = 'M';\n",
"\n",
"================================\u001b[1m Human Message \u001b[0m=================================\n",
@ -545,78 +365,16 @@
"output_type": "stream",
"text": [
"SQL: SELECT username, name\n",
" FROM demo.test_profiles\n",
" FROM demo.user_profiles\n",
" WHERE sex = 'F'\n",
" ORDER BY username;\n"
" ORDER BY username;\n",
" username name\n",
"0 alexander40 Tina Ramirez\n",
"1 bburton Paula Kaiser\n",
"2 brian12 Stefanie Williams\n",
"3 brownanna Jennifer Rowe\n",
"4 carl19 Amanda Potts\n"
]
},
{
"data": {
"text/html": [
"<div>\n",
"<style scoped>\n",
" .dataframe tbody tr th:only-of-type {\n",
" vertical-align: middle;\n",
" }\n",
"\n",
" .dataframe tbody tr th {\n",
" vertical-align: top;\n",
" }\n",
"\n",
" .dataframe thead th {\n",
" text-align: right;\n",
" }\n",
"</style>\n",
"<table border=\"1\" class=\"dataframe\">\n",
" <thead>\n",
" <tr style=\"text-align: right;\">\n",
" <th></th>\n",
" <th>username</th>\n",
" <th>name</th>\n",
" </tr>\n",
" </thead>\n",
" <tbody>\n",
" <tr>\n",
" <th>0</th>\n",
" <td>alexander40</td>\n",
" <td>Tina Ramirez</td>\n",
" </tr>\n",
" <tr>\n",
" <th>1</th>\n",
" <td>bburton</td>\n",
" <td>Paula Kaiser</td>\n",
" </tr>\n",
" <tr>\n",
" <th>2</th>\n",
" <td>brian12</td>\n",
" <td>Stefanie Williams</td>\n",
" </tr>\n",
" <tr>\n",
" <th>3</th>\n",
" <td>brownanna</td>\n",
" <td>Jennifer Rowe</td>\n",
" </tr>\n",
" <tr>\n",
" <th>4</th>\n",
" <td>carl19</td>\n",
" <td>Amanda Potts</td>\n",
" </tr>\n",
" </tbody>\n",
"</table>\n",
"</div>"
],
"text/plain": [
" username name\n",
"0 alexander40 Tina Ramirez\n",
"1 bburton Paula Kaiser\n",
"2 brian12 Stefanie Williams\n",
"3 brownanna Jennifer Rowe\n",
"4 carl19 Amanda Potts"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
@ -626,7 +384,7 @@
")\n",
"\n",
"print(f\"SQL: {response.sql}\")\n",
"response.dataframe.head()"
"print(response.dataframe.head())"
]
}
],
@ -646,7 +404,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.8.19"
}
},
"nbformat": 4,

@ -147,7 +147,7 @@
"\n",
"### ChatOpenAI.bind_tools()\n",
"\n",
"With `ChatAnthropic.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an Anthropic tool schemas, which looks like:\n",
"With `ChatOpenAI.bind_tools`, we can easily pass in Pydantic classes, dict schemas, LangChain tools, or even functions as tools to the model. Under the hood these are converted to an OpenAI tool schemas, which looks like:\n",
"```\n",
"{\n",
" \"name\": \"...\",\n",

@ -26,14 +26,22 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 1,
"metadata": {
"collapsed": false,
"jupyter": {
"outputs_hidden": false
}
},
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"# Install the package\n",
"%pip install --upgrade --quiet dashscope"
@ -48,15 +56,7 @@
"outputs_hidden": false
}
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" ········\n"
]
}
],
"outputs": [],
"source": [
"# Get a new token: https://help.aliyun.com/document_detail/611472.html?spm=a2c4g.2399481.0.0\n",
"from getpass import getpass\n",
@ -94,8 +94,12 @@
"name": "stdout",
"output_type": "stream",
"text": [
"chat resp: content='Hello! How' additional_kwargs={} example=False\n",
"chat resp: content=' can I assist you today?' additional_kwargs={} example=False\n"
"chat resp: content='Hello' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n",
"chat resp: content='!' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n",
"chat resp: content=' How' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n",
"chat resp: content=' can I assist you today' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n",
"chat resp: content='?' id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n",
"chat resp: content='' response_metadata={'finish_reason': 'stop', 'request_id': '921db2c5-4d53-9a89-8e87-e4ad6a671237', 'token_usage': {'input_tokens': 20, 'output_tokens': 9, 'total_tokens': 29}} id='run-f2301962-6d46-423c-8afa-1e667bd11e2b'\n"
]
}
],
@ -116,10 +120,18 @@
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/cheese/PARA/Projects/langchain-contribution/langchain/libs/core/langchain_core/_api/deprecation.py:119: LangChainDeprecationWarning: The method `BaseChatModel.__call__` was deprecated in langchain-core 0.1.7 and will be removed in 0.2.0. Use invoke instead.\n",
" warn_deprecated(\n"
]
},
{
"data": {
"text/plain": [
"AIMessageChunk(content=\"J'aime programmer.\", additional_kwargs={}, example=False)"
"AIMessage(content=\"J'adore programmer.\", response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'stop', 'request_id': 'ae725086-0ffa-9728-8c72-b204c7bc7eeb', 'token_usage': {'input_tokens': 36, 'output_tokens': 6, 'total_tokens': 42}}, id='run-060cc103-ef5f-4c8a-af40-792ac7f40c26-0')"
]
},
"execution_count": 5,
@ -149,18 +161,65 @@
"ChatTongyi supports tool calling API that lets you describe tools and their arguments, and have the model return a JSON object with a tool to invoke and the inputs to that tool."
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Use with `bind_tools`"
]
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"content='' additional_kwargs={'tool_calls': [{'function': {'name': 'multiply', 'arguments': '{\"first_int\": 5, \"second_int\": 42}'}, 'id': '', 'type': 'function'}]} response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': '4acf0e36-44af-987a-a0c0-8b5c5eaa1a8b', 'token_usage': {'input_tokens': 200, 'output_tokens': 25, 'total_tokens': 225}} id='run-0ecd0f09-1d20-4e55-a4f3-f14d1f710ae7-0' tool_calls=[{'name': 'multiply', 'args': {'first_int': 5, 'second_int': 42}, 'id': ''}]\n"
]
}
],
"source": [
"from langchain_community.chat_models.tongyi import ChatTongyi\n",
"from langchain_core.tools import tool\n",
"\n",
"\n",
"@tool\n",
"def multiply(first_int: int, second_int: int) -> int:\n",
" \"\"\"Multiply two integers together.\"\"\"\n",
" return first_int * second_int\n",
"\n",
"\n",
"llm = ChatTongyi(model=\"qwen-turbo\")\n",
"\n",
"llm_with_tools = llm.bind_tools([multiply])\n",
"\n",
"msg = llm_with_tools.invoke(\"What's 5 times forty two\")\n",
"\n",
"print(msg)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Construct args manually"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': 'dae79197-8780-9b7e-8c15-6a83e2a53534', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-9e06f837-582b-473b-bb1f-5e99a68ecc10-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])"
"AIMessage(content='', additional_kwargs={'tool_calls': [{'function': {'name': 'get_current_weather', 'arguments': '{\"location\": \"San Francisco\"}'}, 'id': '', 'type': 'function'}]}, response_metadata={'model_name': 'qwen-turbo', 'finish_reason': 'tool_calls', 'request_id': '87ef33d2-5c6b-9457-91e2-39faad7120eb', 'token_usage': {'input_tokens': 229, 'output_tokens': 19, 'total_tokens': 248}}, id='run-7939ba7f-e3f7-46f8-980b-30499b52723c-0', tool_calls=[{'name': 'get_current_weather', 'args': {'location': 'San Francisco'}, 'id': ''}])"
]
},
"execution_count": 5,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@ -224,7 +283,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12"
"version": "3.12.2"
}
},
"nbformat": 4,

@ -6,11 +6,17 @@
"source": [
"# Browserbase\n",
"\n",
"[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.\n",
"[Browserbase](https://browserbase.com) is a developer platform to reliably run, manage, and monitor headless browsers.\n",
"\n",
"## Installation\n",
"Power your AI data retrievals with:\n",
"- [Serverless Infrastructure](https://docs.browserbase.com/under-the-hood) providing reliable browsers to extract data from complex UIs\n",
"- [Stealth Mode](https://docs.browserbase.com/features/stealth-mode) with included fingerprinting tactics and automatic captcha solving\n",
"- [Session Debugger](https://docs.browserbase.com/features/sessions) to inspect your Browser Session with networks timeline and logs\n",
"- [Live Debug](https://docs.browserbase.com/guides/session-debug-connection/browser-remote-control) to quickly debug your automation\n",
"\n",
"- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).\n",
"## Installation and Setup\n",
"\n",
"- Get an API key and Project ID from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID`).\n",
"- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):"
]
},
@ -64,6 +70,20 @@
"print(docs[0].page_content[:61])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Loader Options\n",
"\n",
"- `urls` Required. A list of URLs to fetch.\n",
"- `text_content` Retrieve only text content. Default is `False`.\n",
"- `api_key` Optional. Browserbase API key. Default is `BROWSERBASE_API_KEY` env variable.\n",
"- `project_id` Optional. Browserbase Project ID. Default is `BROWSERBASE_PROJECT_ID` env variable.\n",
"- `session_id` Optional. Provide an existing Session ID.\n",
"- `proxy` Optional. Enable/Disable Proxies."
]
},
{
"cell_type": "markdown",
"metadata": {},

@ -916,9 +916,29 @@
"source": [
"## `Cassandra` caches\n",
"\n",
"You can use Cassandra / Astra DB through CQL for caching LLM responses, choosing from the exact-match `CassandraCache` or the (vector-similarity-based) `CassandraSemanticCache`.\n",
"> [Apache Cassandra®](https://cassandra.apache.org/) is a NoSQL, row-oriented, highly scalable and highly available database. Starting with version 5.0, the database ships with [vector search capabilities](https://cassandra.apache.org/doc/trunk/cassandra/vector-search/overview.html).\n",
"\n",
"Let's see both in action in the following cells."
"You can use Cassandra for caching LLM responses, choosing from the exact-match `CassandraCache` or the (vector-similarity-based) `CassandraSemanticCache`.\n",
"\n",
"Let's see both in action. The next cells guide you through the (little) required setup, and the following cells showcase the two available cache classes."
]
},
{
"cell_type": "markdown",
"id": "6cf6acb4-1bc4-4c4b-9325-2420c17e5e2b",
"metadata": {},
"source": [
"### Required dependency"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fe842b0d-fd3d-47dd-bc6a-975997c9707f",
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet \"cassio>=0.1.4\""
]
},
{
@ -926,54 +946,125 @@
"id": "a4a6725d",
"metadata": {},
"source": [
"#### Connect to the DB\n",
"### Connect to the DB\n",
"\n",
"The Cassandra caches shown in this page can be used with Cassandra as well as other derived databases, such as Astra DB, which use the CQL (Cassandra Query Language) protocol.\n",
"\n",
"> DataStax [Astra DB](https://docs.datastax.com/en/astra-serverless/docs/vector-search/quickstart.html) is a managed serverless database built on Cassandra, offering the same interface and strengths.\n",
"\n",
"Depending on whether you connect to a Cassandra cluster or to Astra DB through CQL, you will provide different parameters when instantiating the cache (through initialization of a CassIO connection)."
]
},
{
"cell_type": "markdown",
"id": "15735abe-2567-43ce-aa91-f253b33b5a88",
"metadata": {},
"source": [
"#### Connecting to a Cassandra cluster\n",
"\n",
"First you need to establish a `Session` to the DB and to specify a _keyspace_ for the cache table(s). The following gets you connected to Astra DB through CQL (see e.g. [here](https://cassio.org/start_here/#vector-database) for more backends and connection options)."
"You first need to create a `cassandra.cluster.Session` object, as described in the [Cassandra driver documentation](https://docs.datastax.com/en/developer/python-driver/latest/api/cassandra/cluster/#module-cassandra.cluster). The details vary (e.g. with network settings and authentication), but this might be something like:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "cc53ce1b",
"id": "e4b898a5-fe0e-4f11-a87b-7979652322a7",
"metadata": {},
"outputs": [],
"source": [
"from cassandra.cluster import Cluster\n",
"\n",
"cluster = Cluster([\"127.0.0.1\"])\n",
"session = cluster.connect()"
]
},
{
"cell_type": "markdown",
"id": "6435198e-8713-4045-906b-879613bf5083",
"metadata": {},
"source": [
"You can now set the session, along with your desired keyspace name, as a global CassIO parameter:"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "992267dc-0d19-45e0-9a13-ccbb6348d804",
"metadata": {},
"outputs": [
{
"name": "stdout",
"name": "stdin",
"output_type": "stream",
"text": [
"\n",
"Keyspace name? my_keyspace\n",
"\n",
"Astra DB Token (\"AstraCS:...\") ········\n",
"Full path to your Secure Connect Bundle? /path/to/secure-connect-databasename.zip\n"
"CASSANDRA_KEYSPACE = demo_keyspace\n"
]
}
],
"source": [
"import cassio\n",
"\n",
"CASSANDRA_KEYSPACE = input(\"CASSANDRA_KEYSPACE = \")\n",
"\n",
"cassio.init(session=session, keyspace=CASSANDRA_KEYSPACE)"
]
},
{
"cell_type": "markdown",
"id": "2cc7ba29-8f84-4fbf-aaf7-3daa1be7e7b0",
"metadata": {},
"source": [
"#### Connecting to Astra DB through CQL\n",
"\n",
"In this case you initialize CassIO with the following connection parameters:\n",
"\n",
"- the Database ID, e.g. `01234567-89ab-cdef-0123-456789abcdef`\n",
"- the Token, e.g. `AstraCS:6gBhNmsk135....` (it must be a \"Database Administrator\" token)\n",
"- Optionally a Keyspace name (if omitted, the default one for the database will be used)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "ead97077-cc79-4f5c-940c-91eb21650466",
"metadata": {},
"outputs": [
{
"name": "stdin",
"output_type": "stream",
"text": [
"ASTRA_DB_ID = 01234567-89ab-cdef-0123-456789abcdef\n",
"ASTRA_DB_APPLICATION_TOKEN = ········\n",
"ASTRA_DB_KEYSPACE (optional, can be left empty) = my_keyspace\n"
]
}
],
"source": [
"import getpass\n",
"\n",
"keyspace = input(\"\\nKeyspace name? \")\n",
"ASTRA_DB_APPLICATION_TOKEN = getpass.getpass('\\nAstra DB Token (\"AstraCS:...\") ')\n",
"ASTRA_DB_SECURE_BUNDLE_PATH = input(\"Full path to your Secure Connect Bundle? \")"
"ASTRA_DB_ID = input(\"ASTRA_DB_ID = \")\n",
"ASTRA_DB_APPLICATION_TOKEN = getpass.getpass(\"ASTRA_DB_APPLICATION_TOKEN = \")\n",
"\n",
"desired_keyspace = input(\"ASTRA_DB_KEYSPACE (optional, can be left empty) = \")\n",
"if desired_keyspace:\n",
" ASTRA_DB_KEYSPACE = desired_keyspace\n",
"else:\n",
" ASTRA_DB_KEYSPACE = None"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "4617f485",
"execution_count": 13,
"id": "cc53ce1b",
"metadata": {},
"outputs": [],
"source": [
"from cassandra.auth import PlainTextAuthProvider\n",
"from cassandra.cluster import Cluster\n",
"import cassio\n",
"\n",
"cluster = Cluster(\n",
" cloud={\n",
" \"secure_connect_bundle\": ASTRA_DB_SECURE_BUNDLE_PATH,\n",
" },\n",
" auth_provider=PlainTextAuthProvider(\"token\", ASTRA_DB_APPLICATION_TOKEN),\n",
")\n",
"session = cluster.connect()"
"cassio.init(\n",
" database_id=ASTRA_DB_ID,\n",
" token=ASTRA_DB_APPLICATION_TOKEN,\n",
" keyspace=ASTRA_DB_KEYSPACE,\n",
")"
]
},
{
@ -981,27 +1072,27 @@
"id": "8665664a",
"metadata": {},
"source": [
"### Exact cache\n",
"### Cassandra: Exact cache\n",
"\n",
"This will avoid invoking the LLM when the supplied prompt is _exactly_ the same as one encountered already:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 3,
"id": "00a5e66f",
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import CassandraCache\n",
"from langchain.globals import set_llm_cache\n",
"from langchain_community.cache import CassandraCache\n",
"from langchain_core.globals import set_llm_cache\n",
"\n",
"set_llm_cache(CassandraCache(session=session, keyspace=keyspace))"
"set_llm_cache(CassandraCache())"
]
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": 9,
"id": "956a5145",
"metadata": {},
"outputs": [
@ -1011,9 +1102,9 @@
"text": [
"\n",
"\n",
"The Moon always shows the same side because it is tidally locked to Earth.\n",
"CPU times: user 41.7 ms, sys: 153 µs, total: 41.8 ms\n",
"Wall time: 1.96 s\n"
"The Moon is tidally locked with the Earth, which means that its rotation on its own axis is synchronized with its orbit around the Earth. This results in the Moon always showing the same side to the Earth. This is because the gravitational forces between the Earth and the Moon have caused the Moon's rotation to slow down over time, until it reached a point where it takes the same amount of time for the Moon to rotate on its axis as it does to orbit around the Earth. This phenomenon is common among satellites in close orbits around their parent planets and is known as tidal locking.\n",
"CPU times: user 92.5 ms, sys: 8.89 ms, total: 101 ms\n",
"Wall time: 1.98 s\n"
]
}
],
@ -1025,7 +1116,7 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 10,
"id": "158f0151",
"metadata": {},
"outputs": [
@ -1035,9 +1126,9 @@
"text": [
"\n",
"\n",
"The Moon always shows the same side because it is tidally locked to Earth.\n",
"CPU times: user 4.09 ms, sys: 0 ns, total: 4.09 ms\n",
"Wall time: 119 ms\n"
"The Moon is tidally locked with the Earth, which means that its rotation on its own axis is synchronized with its orbit around the Earth. This results in the Moon always showing the same side to the Earth. This is because the gravitational forces between the Earth and the Moon have caused the Moon's rotation to slow down over time, until it reached a point where it takes the same amount of time for the Moon to rotate on its axis as it does to orbit around the Earth. This phenomenon is common among satellites in close orbits around their parent planets and is known as tidal locking.\n",
"CPU times: user 5.51 ms, sys: 0 ns, total: 5.51 ms\n",
"Wall time: 5.78 ms\n"
]
}
],
@ -1052,14 +1143,14 @@
"id": "8fc4d017",
"metadata": {},
"source": [
"### Semantic cache\n",
"### Cassandra: Semantic cache\n",
"\n",
"This cache will do a semantic similarity search and return a hit if it finds a cached entry that is similar enough, For this, you need to provide an `Embeddings` instance of your choice."
]
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 14,
"id": "b9ad3f54",
"metadata": {},
"outputs": [],
@ -1071,26 +1162,25 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 17,
"id": "4623f95e",
"metadata": {},
"outputs": [],
"source": [
"from langchain.cache import CassandraSemanticCache\n",
"from langchain_community.cache import CassandraSemanticCache\n",
"from langchain_core.globals import set_llm_cache\n",
"\n",
"set_llm_cache(\n",
" CassandraSemanticCache(\n",
" session=session,\n",
" keyspace=keyspace,\n",
" embedding=embedding,\n",
" table_name=\"cass_sem_cache\",\n",
" table_name=\"my_semantic_cache\",\n",
" )\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 19,
"id": "1a8e577b",
"metadata": {},
"outputs": [
@ -1100,9 +1190,9 @@
"text": [
"\n",
"\n",
"The Moon always shows the same side because it is tidally locked with Earth. This means that the same side of the Moon always faces Earth.\n",
"CPU times: user 21.3 ms, sys: 177 µs, total: 21.4 ms\n",
"Wall time: 3.09 s\n"
"The Moon is always showing the same side because of a phenomenon called synchronous rotation. This means that the Moon rotates on its axis at the same rate that it orbits around the Earth, which takes approximately 27.3 days. This results in the same side of the Moon always facing the Earth. This is due to the gravitational forces between the Earth and the Moon, which have caused the Moon's rotation to gradually slow down and become synchronized with its orbit. This is a common occurrence among many moons in our solar system.\n",
"CPU times: user 49.5 ms, sys: 7.38 ms, total: 56.9 ms\n",
"Wall time: 2.55 s\n"
]
}
],
@ -1114,7 +1204,7 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 20,
"id": "f7abddfd",
"metadata": {},
"outputs": [
@ -1124,9 +1214,9 @@
"text": [
"\n",
"\n",
"The Moon always shows the same side because it is tidally locked with Earth. This means that the same side of the Moon always faces Earth.\n",
"CPU times: user 10.9 ms, sys: 17 µs, total: 10.9 ms\n",
"Wall time: 461 ms\n"
"The Moon is always showing the same side because of a phenomenon called synchronous rotation. This means that the Moon rotates on its axis at the same rate that it orbits around the Earth, which takes approximately 27.3 days. This results in the same side of the Moon always facing the Earth. This is due to the gravitational forces between the Earth and the Moon, which have caused the Moon's rotation to gradually slow down and become synchronized with its orbit. This is a common occurrence among many moons in our solar system.\n",
"CPU times: user 21.2 ms, sys: 3.38 ms, total: 24.6 ms\n",
"Wall time: 532 ms\n"
]
}
],

@ -216,6 +216,42 @@
"}"
]
},
{
"cell_type": "markdown",
"id": "7574c6f0",
"metadata": {},
"source": [
"### Streaming\n",
"\n",
"To get streaming of LLM output, you can create a Huggingface `TextIteratorStreamer` for `_forward_params`."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "548c9489",
"metadata": {},
"outputs": [],
"source": [
"from threading import Thread\n",
"\n",
"from transformers import TextIteratorStreamer\n",
"\n",
"streamer = TextIteratorStreamer(\n",
" ov_llm.pipeline.tokenizer,\n",
" timeout=30.0,\n",
" skip_prompt=True,\n",
" skip_special_tokens=True,\n",
")\n",
"ov_llm.pipeline._forward_params = {\"streamer\": streamer, \"max_new_tokens\": 100}\n",
"\n",
"t1 = Thread(target=chain.invoke, args=({\"question\": question},))\n",
"t1.start()\n",
"\n",
"for new_text in streamer:\n",
" print(new_text, end=\"\", flush=True)"
]
},
{
"cell_type": "markdown",
"id": "da9a9239",

@ -22,7 +22,8 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
"**Sambaverse** allows you to interact with multiple open-source models. You can view the list of available models and interact with them in the [playground](https://sambaverse.sambanova.ai/playground).\n",
" **Please note that Sambaverse's free offering is performance-limited.** Companies that are ready to evaluate the production tokens-per-second performance, volume throughput, and 10x lower total cost of ownership (TCO) of SambaNova should [contact us](https://sambaverse.sambanova.ai/contact-us) for a non-limited evaluation instance."
]
},
{
@ -88,9 +89,10 @@
" \"temperature\": 0.01,\n",
" \"process_prompt\": True,\n",
" \"select_expert\": \"llama-2-7b-chat-hf\",\n",
" # \"repetition_penalty\": {\"type\": \"float\", \"value\": \"1\"},\n",
" # \"top_k\": {\"type\": \"int\", \"value\": \"50\"},\n",
" # \"top_p\": {\"type\": \"float\", \"value\": \"1\"}\n",
" # \"stop_sequences\": '\\\"sequence1\\\",\\\"sequence2\\\"',\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",
@ -177,10 +179,10 @@
" \"do_sample\": True,\n",
" \"max_tokens_to_generate\": 1000,\n",
" \"temperature\": 0.01,\n",
" # \"repetition_penalty\": {\"type\": \"float\", \"value\": \"1\"},\n",
" # \"top_k\": {\"type\": \"int\", \"value\": \"50\"},\n",
" # \"top_logprobs\": {\"type\": \"int\", \"value\": \"0\"},\n",
" # \"top_p\": {\"type\": \"float\", \"value\": \"1\"}\n",
" # \"repetition_penalty\": 1.0,\n",
" # \"top_k\": 50,\n",
" # \"top_logprobs\": 0,\n",
" # \"top_p\": 1.0\n",
" },\n",
")\n",
"\n",

@ -1,10 +1,16 @@
# Browserbase
>[Browserbase](https://browserbase.com) is a serverless platform for running headless browsers, it offers advanced debugging, session recordings, stealth mode, integrated proxies and captcha solving.
[Browserbase](https://browserbase.com) is a developer platform to reliably run, manage, and monitor headless browsers.
Power your AI data retrievals with:
- [Serverless Infrastructure](https://docs.browserbase.com/under-the-hood) providing reliable browsers to extract data from complex UIs
- [Stealth Mode](https://docs.browserbase.com/features/stealth-mode) with included fingerprinting tactics and automatic captcha solving
- [Session Debugger](https://docs.browserbase.com/features/sessions) to inspect your Browser Session with networks timeline and logs
- [Live Debug](https://docs.browserbase.com/guides/session-debug-connection/browser-remote-control) to quickly debug your automation
## Installation and Setup
- Get an API key from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`).
- Get an API key and Project ID from [browserbase.com](https://browserbase.com) and set it in environment variables (`BROWSERBASE_API_KEY`, `BROWSERBASE_PROJECT_ID`).
- Install the [Browserbase SDK](http://github.com/browserbase/python-sdk):
```python

@ -61,4 +61,4 @@ FROM
{'input': 'Return the sql for this question: How many employees are in the company?', 'output': "SELECT \n COUNT(*)\nFROM \n employees"}
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -31,4 +31,4 @@ from langchain.agents import load_tools
tools = load_tools(["golden-query"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -71,4 +71,4 @@ from langchain.agents import load_tools
tools = load_tools(["google-serper"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -41,4 +41,4 @@ from langchain.agents import load_tools
tools = load_tools(["openweathermap-api"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -77,4 +77,4 @@ from langchain.agents import load_tools
tools = load_tools(["searchapi"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -87,4 +87,4 @@ arxiv_tool = SearxSearchResults(name="Arxiv", wrapper=wrapper,
})
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -28,4 +28,4 @@ from langchain.agents import load_tools
tools = load_tools(["serpapi"])
```
For more information on this, see [this page](/docs/how_to/tools)
For more information on this, see [this page](/docs/how_to/tools_builtin)

@ -33,4 +33,4 @@ from langchain.agents import load_tools
tools = load_tools(["stackexchange"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -36,4 +36,4 @@ from langchain.agents import load_tools
tools = load_tools(["wolfram-alpha"])
```
For more information on tools, see [this page](/docs/how_to/tools).
For more information on tools, see [this page](/docs/how_to/tools_builtin).

@ -33,7 +33,7 @@
"- `voyage-code-2`\n",
"- `voyage-2`\n",
"- `voyage-law-2`\n",
"- `voyage-lite-02-instruct`"
"- `voyage-large-2-instruct`"
]
},
{

File diff suppressed because one or more lines are too long

@ -15,8 +15,6 @@
"source": [
"This notebook goes over how to use the bing search component.\n",
"\n",
"First, you need to set up the proper API keys and environment variables. To set it up, follow the instructions found [here](https://levelup.gitconnected.com/api-tutorial-how-to-use-bing-web-search-api-in-python-4165d5592a7e).\n",
"\n",
"Then we will need to set some environment variables."
]
},

@ -161,7 +161,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
"metadata": {
"id": "67ab8afa-f7c6-4fbf-b596-cb512da949da",
@ -194,7 +194,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"id": "aac9563e",
"metadata": {
"id": "aac9563e",
@ -208,7 +208,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"id": "a3c3999a",
"metadata": {
"id": "a3c3999a",
@ -229,7 +229,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 6,
"id": "12eb86d8",
"metadata": {
"id": "12eb86d8",
@ -271,7 +271,7 @@
},
{
"cell_type": "code",
"execution_count": 5,
"execution_count": 7,
"id": "5d076412",
"metadata": {},
"outputs": [
@ -313,7 +313,7 @@
},
{
"cell_type": "code",
"execution_count": 6,
"execution_count": 8,
"id": "b2a4bd1b",
"metadata": {},
"outputs": [
@ -345,7 +345,7 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 9,
"id": "f3d294ff",
"metadata": {},
"outputs": [
@ -375,7 +375,7 @@
},
{
"cell_type": "code",
"execution_count": 59,
"execution_count": 10,
"id": "55b63a61",
"metadata": {},
"outputs": [
@ -405,7 +405,7 @@
},
{
"cell_type": "code",
"execution_count": 60,
"execution_count": 11,
"id": "9b831b3d",
"metadata": {},
"outputs": [
@ -435,7 +435,7 @@
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 12,
"id": "fb1482e7",
"metadata": {},
"outputs": [],
@ -504,27 +504,29 @@
"metadata": {},
"source": [
"# Retrieval Strategies\n",
"Elasticsearch has big advantages over other vector only databases from its ability to support a wide range of retrieval strategies. In this notebook we will configure `ElasticsearchStore` to support some of the most common retrieval strategies. \n",
"Elasticsearch has big advantages over other vector only databases from its ability to support a wide range of retrieval strategies. In this notebook we will configure `ElasticsearchStore` to support some of the most common retrieval strategies. \n",
"\n",
"By default, `ElasticsearchStore` uses the `ApproxRetrievalStrategy`.\n",
"By default, `ElasticsearchStore` uses the `DenseVectorStrategy` (was called `ApproxRetrievalStrategy` prior to version 0.2.0).\n",
"\n",
"## ApproxRetrievalStrategy\n",
"This will return the top `k` most similar vectors to the query vector. The `k` parameter is set when the `ElasticsearchStore` is initialized. The default value is `10`."
"## DenseVectorStrategy\n",
"This will return the top `k` most similar vectors to the query vector. The `k` parameter is set when the `ElasticsearchStore` is initialized. The default value is `10`."
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 13,
"id": "999b5ef5",
"metadata": {},
"outputs": [],
"source": [
"from langchain_elasticsearch import DenseVectorStrategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs,\n",
" embeddings,\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(),\n",
" strategy=DenseVectorStrategy(),\n",
")\n",
"\n",
"docs = db.similarity_search(\n",
@ -537,12 +539,12 @@
"id": "9b651be5",
"metadata": {},
"source": [
"### Example: Approx with hybrid\n",
"### Example: Hybrid retrieval with dense vector and keyword search\n",
"This example will show how to configure `ElasticsearchStore` to perform a hybrid retrieval, using a combination of approximate semantic search and keyword based search. \n",
"\n",
"We use RRF to balance the two scores from different retrieval methods.\n",
"\n",
"To enable hybrid retrieval, we need to set `hybrid=True` in `ElasticsearchStore` `ApproxRetrievalStrategy` constructor.\n",
"To enable hybrid retrieval, we need to set `hybrid=True` in the `DenseVectorStrategy` constructor.\n",
"\n",
"```python\n",
"\n",
@ -551,9 +553,7 @@
" embeddings, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" hybrid=True,\n",
" )\n",
" strategy=DenseVectorStrategy(hybrid=True)\n",
")\n",
"```\n",
"\n",
@ -582,22 +582,22 @@
"}\n",
"```\n",
"\n",
"### Example: Approx with Embedding Model in Elasticsearch\n",
"This example will show how to configure `ElasticsearchStore` to use the embedding model deployed in Elasticsearch for approximate retrieval. \n",
"### Example: Dense vector search with Embedding Model in Elasticsearch\n",
"This example will show how to configure `ElasticsearchStore` to use the embedding model deployed in Elasticsearch for dense vector retrieval.\n",
"\n",
"To use this, specify the model_id in `ElasticsearchStore` `ApproxRetrievalStrategy` constructor via the `query_model_id` argument.\n",
"To use this, specify the model_id in `DenseVectorStrategy` constructor via the `query_model_id` argument.\n",
"\n",
"**NOTE** This requires the model to be deployed and running in Elasticsearch ml node. See [notebook example](https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/integrations/hugging-face/loading-model-from-hugging-face.ipynb) on how to deploy the model with eland.\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"execution_count": 14,
"id": "0a0c85e7",
"metadata": {},
"outputs": [],
"source": [
"APPROX_SELF_DEPLOYED_INDEX_NAME = \"test-approx-self-deployed\"\n",
"DENSE_SELF_DEPLOYED_INDEX_NAME = \"test-dense-self-deployed\"\n",
"\n",
"# Note: This does not have an embedding function specified\n",
"# Instead, we will use the embedding model deployed in Elasticsearch\n",
@ -605,12 +605,10 @@
" es_cloud_id=\"<your cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"<your password>\",\n",
" index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" query_field=\"text_field\",\n",
" vector_query_field=\"vector_query_field.predicted_value\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" query_model_id=\"sentence-transformers__all-minilm-l6-v2\"\n",
" ),\n",
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
")\n",
"\n",
"# Setup a Ingest Pipeline to perform the embedding\n",
@ -631,7 +629,7 @@
"# creating a new index with the pipeline,\n",
"# not relying on langchain to create the index\n",
"db.client.indices.create(\n",
" index=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" mappings={\n",
" \"properties\": {\n",
" \"text_field\": {\"type\": \"text\"},\n",
@ -655,12 +653,10 @@
" es_cloud_id=\"<cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"<cloud password>\",\n",
" index_name=APPROX_SELF_DEPLOYED_INDEX_NAME,\n",
" index_name=DENSE_SELF_DEPLOYED_INDEX_NAME,\n",
" query_field=\"text_field\",\n",
" vector_query_field=\"vector_query_field.predicted_value\",\n",
" strategy=ElasticsearchStore.ApproxRetrievalStrategy(\n",
" query_model_id=\"sentence-transformers__all-minilm-l6-v2\"\n",
" ),\n",
" strategy=DenseVectorStrategy(model_id=\"sentence-transformers__all-minilm-l6-v2\"),\n",
")\n",
"\n",
"# Perform search\n",
@ -672,12 +668,12 @@
"id": "53959de6",
"metadata": {},
"source": [
"## SparseVectorRetrievalStrategy (ELSER)\n",
"## SparseVectorStrategy (ELSER)\n",
"This strategy uses Elasticsearch's sparse vector retrieval to retrieve the top-k results. We only support our own \"ELSER\" embedding model for now.\n",
"\n",
"**NOTE** This requires the ELSER model to be deployed and running in Elasticsearch ml node. \n",
"\n",
"To use this, specify `SparseVectorRetrievalStrategy` in `ElasticsearchStore` constructor."
"To use this, specify `SparseVectorStrategy` (was called `SparseVectorRetrievalStrategy` prior to version 0.2.0) in the `ElasticsearchStore` constructor. You will need to provide a model ID."
]
},
{
@ -695,15 +691,17 @@
}
],
"source": [
"from langchain_elasticsearch import SparseVectorStrategy\n",
"\n",
"# Note that this example doesn't have an embedding function. This is because we infer the tokens at index time and at query time within Elasticsearch.\n",
"# This requires the ELSER model to be loaded and running in Elasticsearch.\n",
"db = ElasticsearchStore.from_documents(\n",
" docs,\n",
" es_cloud_id=\"My_deployment:dXMtY2VudHJhbDEuZ2NwLmNsb3VkLmVzLmlvOjQ0MyQ2OGJhMjhmNDc1M2Y0MWVjYTk2NzI2ZWNkMmE5YzRkNyQ3NWI4ODRjNWQ2OTU0MTYzODFjOTkxNmQ1YzYxMGI1Mw==\",\n",
" es_cloud_id=\"<cloud id>\",\n",
" es_user=\"elastic\",\n",
" es_password=\"GgUPiWKwEzgHIYdHdgPk1Lwi\",\n",
" es_password=\"<cloud password>\",\n",
" index_name=\"test-elser\",\n",
" strategy=ElasticsearchStore.SparseVectorRetrievalStrategy(),\n",
" strategy=SparseVectorStrategy(model_id=\".elser_model_2\"),\n",
")\n",
"\n",
"db.client.indices.refresh(index=\"test-elser\")\n",
@ -719,19 +717,42 @@
"id": "edf3a093",
"metadata": {},
"source": [
"## ExactRetrievalStrategy\n",
"This strategy uses Elasticsearch's exact retrieval (also known as brute force) to retrieve the top-k results.\n",
"## DenseVectorScriptScoreStrategy\n",
"This strategy uses Elasticsearch's script score query to perform exact vector retrieval (also known as brute force) to retrieve the top-k results. (This strategy was called `ExactRetrievalStrategy` prior to version 0.2.0.)\n",
"\n",
"To use this, specify `ExactRetrievalStrategy` in `ElasticsearchStore` constructor.\n",
"To use this, specify `DenseVectorScriptScoreStrategy` in `ElasticsearchStore` constructor.\n",
"\n",
"```python\n",
"from langchain_elasticsearch import SparseVectorStrategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs, \n",
" embeddings, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=ElasticsearchStore.ExactRetrievalStrategy()\n",
" strategy=DenseVectorScriptScoreStrategy(),\n",
")\n",
"```"
]
},
{
"cell_type": "markdown",
"id": "11b51c47",
"metadata": {},
"source": [
"## BM25Strategy\n",
"Finally, you can use full-text keyword search.\n",
"\n",
"To use this, specify `BM25Strategy` in `ElasticsearchStore` constructor.\n",
"\n",
"```python\n",
"from langchain_elasticsearch import BM25Strategy\n",
"\n",
"db = ElasticsearchStore.from_documents(\n",
" docs, \n",
" es_url=\"http://localhost:9200\", \n",
" index_name=\"test\",\n",
" strategy=BM25Strategy(),\n",
")\n",
"```"
]
@ -924,9 +945,9 @@
"\n",
"## What's new?\n",
"\n",
"The new implementation is now one class called `ElasticsearchStore` which can be used for approx, exact, and ELSER search retrieval, via strategies.\n",
"The new implementation is now one class called `ElasticsearchStore` which can be used for approximate dense vector, exact dense vector, sparse vector (ELSER), BM25 retrieval and hybrid retrieval, via strategies.\n",
"\n",
"## Im using ElasticKNNSearch\n",
"## I am using ElasticKNNSearch\n",
"\n",
"Old implementation:\n",
"\n",
@ -946,21 +967,21 @@
"\n",
"```python\n",
"\n",
"from langchain_elasticsearch import ElasticsearchStore\n",
"from langchain_elasticsearch import ElasticsearchStore, DenseVectorStrategy\n",
"\n",
"db = ElasticsearchStore(\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test_index\",\n",
" embedding=embedding,\n",
" # if you use the model_id\n",
" # strategy=ElasticsearchStore.ApproxRetrievalStrategy( query_model_id=\"test_model\" )\n",
" # strategy=DenseVectorStrategy(model_id=\"test_model\")\n",
" # if you use hybrid search\n",
" # strategy=ElasticsearchStore.ApproxRetrievalStrategy( hybrid=True )\n",
" # strategy=DenseVectorStrategy(hybrid=True)\n",
")\n",
"\n",
"```\n",
"\n",
"## Im using ElasticVectorSearch\n",
"## I am using ElasticVectorSearch\n",
"\n",
"Old implementation:\n",
"\n",
@ -980,13 +1001,13 @@
"\n",
"```python\n",
"\n",
"from langchain_elasticsearch import ElasticsearchStore\n",
"from langchain_elasticsearch import ElasticsearchStore, DenseVectorScriptScoreStrategy\n",
"\n",
"db = ElasticsearchStore(\n",
" es_url=\"http://localhost:9200\",\n",
" index_name=\"test_index\",\n",
" embedding=embedding,\n",
" strategy=ElasticsearchStore.ExactRetrievalStrategy()\n",
" strategy=DenseVectorScriptScoreStrategy()\n",
")\n",
"\n",
"```"

@ -12,16 +12,7 @@
"\n",
"This notebook shows how to use functionality related to the `Pinecone` vector database.\n",
"\n",
"To use Pinecone, you must have an API key. \n",
"Here are the [installation instructions](https://docs.pinecone.io/docs/quickstart).\n",
"\n",
"Set the following environment variables to make using the `Pinecone` integration easier:\n",
"\n",
"- `PINECONE_API_KEY`: Your Pinecone API key.\n",
"- `PINECONE_INDEX_NAME`: The name of the index you want to use.\n",
"\n",
"And to follow along in this doc, you should also set\n",
"\n",
"Set the following environment variables to follow along in this doc:\n",
"- `OPENAI_API_KEY`: Your OpenAI API key, for using `OpenAIEmbeddings`"
]
},
@ -34,7 +25,11 @@
},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langchain-pinecone langchain-openai langchain"
"%pip install --upgrade --quiet \\\n",
" langchain-pinecone \\\n",
" langchain-openai \\\n",
" langchain \\\n",
" pinecone-notebooks"
]
},
{
@ -74,12 +69,91 @@
},
{
"cell_type": "markdown",
"id": "3a4d377f",
"id": "ef6dc4de",
"metadata": {},
"source": [
"Now let's create a new Pinecone account, or sign into your existing one, and create an API key to use in this notebook."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "1fdc3c36",
"metadata": {},
"outputs": [],
"source": [
"Now let's assume you have your Pinecone index set up with `dimension=1536`.\n",
"from pinecone_notebooks.colab import Authenticate\n",
"\n",
"We can connect to our Pinecone index and insert those chunked docs as contents with `PineconeVectorStore.from_documents`."
"Authenticate()"
]
},
{
"cell_type": "markdown",
"id": "54da1a39",
"metadata": {},
"source": [
"The newly created API key has been stored in the `PINECONE_API_KEY` environment variable. We will use it to setup the Pinecone client."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eb554814",
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"pinecone_api_key = os.environ.get(\"PINECONE_API_KEY\")\n",
"pinecone_api_key\n",
"\n",
"import time\n",
"\n",
"from pinecone import Pinecone, ServerlessSpec\n",
"\n",
"pc = Pinecone(api_key=pinecone_api_key)"
]
},
{
"cell_type": "markdown",
"id": "658706a3",
"metadata": {},
"source": [
"Next, let's connect to your Pinecone index. If one named `index_name` doesn't exist, it will be created."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "276a06dd",
"metadata": {},
"outputs": [],
"source": [
"import time\n",
"\n",
"index_name = \"langchain-index\" # change if desired\n",
"\n",
"existing_indexes = [index_info[\"name\"] for index_info in pc.list_indexes()]\n",
"\n",
"if index_name not in existing_indexes:\n",
" pc.create_index(\n",
" name=index_name,\n",
" dimension=1536,\n",
" metric=\"cosine\",\n",
" spec=ServerlessSpec(cloud=\"aws\", region=\"us-east-1\"),\n",
" )\n",
" while not pc.describe_index(index_name).status[\"ready\"]:\n",
" time.sleep(1)\n",
"\n",
"index = pc.Index(index_name)"
]
},
{
"cell_type": "markdown",
"id": "3a4d377f",
"metadata": {},
"source": [
"Now that our Pinecone index is setup, we can upsert those chunked docs as contents with `PineconeVectorStore.from_documents`."
]
},
{
@ -91,8 +165,6 @@
"source": [
"from langchain_pinecone import PineconeVectorStore\n",
"\n",
"index_name = \"langchain-test-index\"\n",
"\n",
"docsearch = PineconeVectorStore.from_documents(docs, embeddings, index_name=index_name)"
]
},
@ -315,14 +387,6 @@
"for i, doc in enumerate(found_docs):\n",
" print(f\"{i + 1}.\", doc.page_content, \"\\n\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "b0fd750b",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
@ -341,7 +405,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.4"
"version": "3.11.6"
}
},
"nbformat": 4,

@ -81,7 +81,7 @@ Deploy LangChain runnables and chains as REST APIs.
## Additional resources
## [Security](/docs/security)
### [Security](/docs/security)
Read up on our [Security](/docs/security) best practices to make sure you're developing safely with LangChain.
### [Integrations](/docs/integrations/providers/)

@ -63,7 +63,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -75,7 +75,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -65,7 +65,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -9,6 +9,7 @@ New to LangChain or to LLM app development in general? Read this material to qui
### Basics
- [Build a Simple LLM Application](/docs/tutorials/llm_chain)
- [Build a Chatbot](/docs/tutorials/chatbot)
- [Build vector stores and retrievers](/docs/tutorials/retrievers)
- [Build an Agent](/docs/tutorials/agents)
### Working with external knowledge

@ -64,7 +64,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -78,7 +78,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -0,0 +1,502 @@
{
"cells": [
{
"cell_type": "markdown",
"id": "bf37a837-7a6a-447b-8779-38f26c585887",
"metadata": {},
"source": [
"# Vector stores and retrievers\n",
"\n",
"This tutorial will familiarize you with LangChain's vector store and retriever abstractions. These abstractions are designed to support retrieval of data-- from (vector) databases and other sources-- for integration with LLM workflows. They are important for applications that fetch data to be reasoned over as part of model inference, as in the case of retrieval-augmented generation, or RAG (see our RAG tutorial [here](/docs/tutorials/rag)).\n",
"\n",
"## Concepts\n",
"\n",
"This guide focuses on retrieval of text data. We will cover the following concepts:\n",
"\n",
"- Documents;\n",
"- Vector stores;\n",
"- Retrievers.\n",
"\n",
"## Setup\n",
"\n",
"### Jupyter Notebook\n",
"\n",
"This and other tutorials are perhaps most conveniently run in a Jupyter notebook. See [here](https://jupyter.org/install) for instructions on how to install.\n",
"\n",
"### Installation\n",
"\n",
"This tutorial requires the `langchain`, `langchain-chroma`, and `langchain-openai` packages:\n",
"\n",
"```{=mdx}\n",
"import Tabs from '@theme/Tabs';\n",
"import TabItem from '@theme/TabItem';\n",
"import CodeBlock from \"@theme/CodeBlock\";\n",
"\n",
"<Tabs>\n",
" <TabItem value=\"pip\" label=\"Pip\" default>\n",
" <CodeBlock language=\"bash\">pip install langchain langchain-chroma langchain-openai</CodeBlock>\n",
" </TabItem>\n",
" <TabItem value=\"conda\" label=\"Conda\">\n",
" <CodeBlock language=\"bash\">conda install langchain langchain-chroma langchain-openai -c conda-forge</CodeBlock>\n",
" </TabItem>\n",
"</Tabs>\n",
"\n",
"```\n",
"\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",
"Many of the applications you build with LangChain will contain multiple steps with multiple invocations of LLM calls.\n",
"As these applications get more and more complex, it becomes crucial to be able to inspect what exactly is going on inside your chain or agent.\n",
"The best way to do this is with [LangSmith](https://smith.langchain.com).\n",
"\n",
"After you sign up at the link above, make sure to set your environment variables to start logging traces:\n",
"\n",
"```shell\n",
"export LANGCHAIN_TRACING_V2=\"true\"\n",
"export LANGCHAIN_API_KEY=\"...\"\n",
"```\n",
"\n",
"Or, if in a notebook, you can set them with:\n",
"\n",
"```python\n",
"import getpass\n",
"import os\n",
"\n",
"os.environ[\"LANGCHAIN_TRACING_V2\"] = \"true\"\n",
"os.environ[\"LANGCHAIN_API_KEY\"] = getpass.getpass()\n",
"```\n",
"\n",
"\n",
"## Documents\n",
"\n",
"LangChain implements a [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html) abstraction, which is intended to represent a unit of text and associated metadata. It has two attributes:\n",
"\n",
"- `page_content`: a string representing the content;\n",
"- `metadata`: a dict containing arbitrary metadata.\n",
"\n",
"The `metadata` attribute can capture information about the source of the document, its relationship to other documents, and other information. Note that an individual `Document` object often represents a chunk of a larger document.\n",
"\n",
"Let's generate some sample documents:"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "9f3dc151-7b2f-4d94-9558-7a84f7eab100",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.documents import Document\n",
"\n",
"documents = [\n",
" Document(\n",
" page_content=\"Dogs are great companions, known for their loyalty and friendliness.\",\n",
" metadata={\"source\": \"mammal-pets-doc\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Cats are independent pets that often enjoy their own space.\",\n",
" metadata={\"source\": \"mammal-pets-doc\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Goldfish are popular pets for beginners, requiring relatively simple care.\",\n",
" metadata={\"source\": \"fish-pets-doc\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Parrots are intelligent birds capable of mimicking human speech.\",\n",
" metadata={\"source\": \"bird-pets-doc\"},\n",
" ),\n",
" Document(\n",
" page_content=\"Rabbits are social animals that need plenty of space to hop around.\",\n",
" metadata={\"source\": \"mammal-pets-doc\"},\n",
" ),\n",
"]"
]
},
{
"cell_type": "markdown",
"id": "1cac19bd-27d1-40f1-9c27-7a586b685b4e",
"metadata": {},
"source": [
"Here we've generated five documents, containing metadata indicating three distinct \"sources\".\n",
"\n",
"## Vector stores\n",
"\n",
"Vector search is a common way to store and search over unstructured data (such as unstructured text). The idea is to store numeric vectors that are associated with the text. Given a query, we can [embed](/docs/concepts#embedding-models) it as a vector of the same dimension and use vector similarity metrics to identify related data in the store.\n",
"\n",
"LangChain [VectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html) objects contain methods for adding text and `Document` objects to the store, and querying them using various similarity metrics. They are often initialized with [embedding](/docs/how_to/embed_text) models, which determine how text data is translated to numeric vectors.\n",
"\n",
"LangChain includes a suite of [integrations](/docs/integrations/vectorstores) with different vector store technologies. Some vector stores are hosted by a provider (e.g., various cloud providers) and require specific credentials to use; some (such as [Postgres](/docs/integrations/vectorstores/pgvector)) run in separate infrastructure that can be run locally or via a third-party; others can run in-memory for lightweight workloads. Here we will demonstrate usage of LangChain VectorStores using [Chroma](/docs/integrations/vectorstores/chroma), which includes an in-memory implementation.\n",
"\n",
"To instantiate a vector store, we often need to provide an [embedding](/docs/how_to/embed_text) model to specify how text should be converted into a numeric vector. Here we will use [OpenAI embeddings](/docs/integrations/text_embedding/openai/)."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "d48acc28-1a34-414b-8e08-fbdef3a2a60b",
"metadata": {},
"outputs": [],
"source": [
"from langchain_chroma import Chroma\n",
"from langchain_openai import OpenAIEmbeddings\n",
"\n",
"vectorstore = Chroma.from_documents(\n",
" documents,\n",
" embedding=OpenAIEmbeddings(),\n",
")"
]
},
{
"cell_type": "markdown",
"id": "ff0f0b43-e5b8-4c79-b782-a02f17345487",
"metadata": {},
"source": [
"Calling `.from_documents` here will add the documents to the vector store. [VectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html) implements methods for adding documents that can also be called after the object is instantiated. Most implementations will allow you to connect to an existing vector store-- e.g., by providing a client, index name, or other information. See the documentation for a specific [integration](/docs/integrations/vectorstores) for more detail.\n",
"\n",
"Once we've instantiated a `VectorStore` that contains documents, we can query it. [VectorStore](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html) includes methods for querying:\n",
"- Synchronously and asynchronously;\n",
"- By string query and by vector;\n",
"- With and without returning similarity scores;\n",
"- By similarity and [maximum marginal relevance](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html#langchain_core.vectorstores.VectorStore.max_marginal_relevance_search) (to balance similarity with query to diversity in retrieved results).\n",
"\n",
"The methods will generally include a list of [Document](https://api.python.langchain.com/en/latest/documents/langchain_core.documents.base.Document.html#langchain_core.documents.base.Document) objects in their outputs.\n",
"\n",
"### Examples\n",
"\n",
"Return documents based on similarity to a string query:"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7e01ed91-1a98-4221-960a-bd7a2541a548",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Parrots are intelligent birds capable of mimicking human speech.', metadata={'source': 'bird-pets-doc'})]"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"vectorstore.similarity_search(\"cat\")"
]
},
{
"cell_type": "markdown",
"id": "4d4f9857-5a7d-4b5f-82b8-ff76539143c2",
"metadata": {},
"source": [
"Async query:"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "618af196-6182-4a7d-8b09-07493fcdc868",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Parrots are intelligent birds capable of mimicking human speech.', metadata={'source': 'bird-pets-doc'})]"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"await vectorstore.asimilarity_search(\"cat\")"
]
},
{
"cell_type": "markdown",
"id": "d4172698-9ad7-4422-99b2-bdc268e99c75",
"metadata": {},
"source": [
"Return scores:"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "4ed24af2-0d82-478c-949b-b389348d4e9f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[(Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'}),\n",
" 0.3751849830150604),\n",
" (Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'source': 'mammal-pets-doc'}),\n",
" 0.48316916823387146),\n",
" (Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'source': 'mammal-pets-doc'}),\n",
" 0.49601367115974426),\n",
" (Document(page_content='Parrots are intelligent birds capable of mimicking human speech.', metadata={'source': 'bird-pets-doc'}),\n",
" 0.4972994923591614)]"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Note that providers implement different scores; Chroma here\n",
"# returns a distance metric that should vary inversely with\n",
"# similarity.\n",
"\n",
"vectorstore.similarity_search_with_score(\"cat\")"
]
},
{
"cell_type": "markdown",
"id": "b4991642-7275-40a9-b11a-e3beccbf2614",
"metadata": {},
"source": [
"Return documents based on similarity to a embedded query:"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "b1a5eabb-a821-48cc-917e-cc27f03e4bcc",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Dogs are great companions, known for their loyalty and friendliness.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Rabbits are social animals that need plenty of space to hop around.', metadata={'source': 'mammal-pets-doc'}),\n",
" Document(page_content='Parrots are intelligent birds capable of mimicking human speech.', metadata={'source': 'bird-pets-doc'})]"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"embedding = OpenAIEmbeddings().embed_query(\"cat\")\n",
"\n",
"vectorstore.similarity_search_by_vector(embedding)"
]
},
{
"cell_type": "markdown",
"id": "168dbbec-ea97-4cc9-bb1a-75519c2d08af",
"metadata": {},
"source": [
"Learn more:\n",
"\n",
"- [API reference](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStore.html)\n",
"- [How-to guide](/docs/how_to/vectorstores)\n",
"- [Integration-specific docs](/docs/integrations/vectorstores)\n",
"\n",
"## Retrievers\n",
"\n",
"LangChain `VectorStore` objects do not subclass [Runnable](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.runnables), and so cannot immediately be integrated into LangChain Expression Language [chains](/docs/concepts/#langchain-expression-language-lcel).\n",
"\n",
"LangChain [Retrievers](https://api.python.langchain.com/en/latest/core_api_reference.html#module-langchain_core.retrievers) are Runnables, so they implement a standard set of methods (e.g., synchronous and asynchronous `invoke` and `batch` operations) and are designed to be incorporated in LCEL chains.\n",
"\n",
"We can create a simple version of this ourselves, without subclassing `Retriever`. If we choose what method we wish to use to retrieve documents, we can create a runnable easily. Below we will build one around the `similarity_search` method:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "f1461582-e569-4326-bd95-510f72edf019",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[[Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'})],\n",
" [Document(page_content='Goldfish are popular pets for beginners, requiring relatively simple care.', metadata={'source': 'fish-pets-doc'})]]"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"from typing import List\n",
"\n",
"from langchain_core.documents import Document\n",
"from langchain_core.runnables import RunnableLambda\n",
"\n",
"retriever = RunnableLambda(vectorstore.similarity_search).bind(k=1) # select top result\n",
"\n",
"retriever.batch([\"cat\", \"shark\"])"
]
},
{
"cell_type": "markdown",
"id": "a36d3f64-a8bc-4baa-b2ea-07e324a0143e",
"metadata": {},
"source": [
"Vectorstores implement an `as_retriever` method that will generate a Retriever, specifically a [VectorStoreRetriever](https://api.python.langchain.com/en/latest/vectorstores/langchain_core.vectorstores.VectorStoreRetriever.html). These retrievers include specific `search_type` and `search_kwargs` attributes that identify what methods of the underlying vector store to call, and how to parameterize them. For instance, we can replicate the above with the following:"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "4989fe5e-ac58-4751-bc35-f53ff885860c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[[Document(page_content='Cats are independent pets that often enjoy their own space.', metadata={'source': 'mammal-pets-doc'})],\n",
" [Document(page_content='Goldfish are popular pets for beginners, requiring relatively simple care.', metadata={'source': 'fish-pets-doc'})]]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"retriever = vectorstore.as_retriever(\n",
" search_type=\"similarity\",\n",
" search_kwargs={\"k\": 1},\n",
")\n",
"\n",
"retriever.batch([\"cat\", \"shark\"])"
]
},
{
"cell_type": "markdown",
"id": "6b79ded3-39ed-4aeb-8b70-cd36795ae239",
"metadata": {},
"source": [
"`VectorStoreRetriever` supports search types of `\"similarity\"` (default), `\"mmr\"` (maximum marginal relevance, described above), and `\"similarity_score_threshold\"`. We can use the latter to threshold documents output by the retriever by similarity score.\n",
"\n",
"Retrievers can easily be incorporated into more complex applications, such as retrieval-augmented generation (RAG) applications that combine a given question with retrieved context into a prompt for a LLM. Below we show a minimal example.\n",
"\n",
"```{=mdx}\n",
"import ChatModelTabs from \"@theme/ChatModelTabs\";\n",
"\n",
"<ChatModelTabs customVarName=\"llm\" />\n",
"```"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "c77b68bf-59f3-4416-9877-960f934c374d",
"metadata": {},
"outputs": [],
"source": [
"# | output: false\n",
"# | echo: false\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"\n",
"llm = ChatOpenAI(model=\"gpt-3.5-turbo\", temperature=0)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "6f1ae0d0-0b4b-4da0-80ce-f82913052a83",
"metadata": {},
"outputs": [],
"source": [
"from langchain_core.prompts import ChatPromptTemplate\n",
"from langchain_core.runnables import RunnablePassthrough\n",
"\n",
"message = \"\"\"\n",
"Answer this question using the provided context only.\n",
"\n",
"{question}\n",
"\n",
"Context:\n",
"{context}\n",
"\"\"\"\n",
"\n",
"prompt = ChatPromptTemplate.from_messages([(\"human\", message)])\n",
"\n",
"rag_chain = {\"context\": retriever, \"question\": RunnablePassthrough()} | prompt | llm"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "b3c0d625-61e0-492e-b3a6-c40d383fca03",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Cats are independent pets that often enjoy their own space.\n"
]
}
],
"source": [
"response = rag_chain.invoke(\"tell me about cats\")\n",
"\n",
"print(response.content)"
]
},
{
"cell_type": "markdown",
"id": "3d9be7cb-2081-48a4-b6e4-d5e2d562ffd4",
"metadata": {},
"source": [
"## Learn more:\n",
"\n",
"Retrieval strategies can be rich and complex. For example:\n",
"\n",
"- We can [infer hard rules and filters](/docs/how_to/self_query/) from a query (e.g., \"using documents published after 2020\");\n",
"- We can [return documents that are linked](/docs/how_to/parent_document_retriever/) to the retrieved context in some way (e.g., via some document taxonomy);\n",
"- We can generate [multiple embeddings](/docs/how_to/multi_vector) for each unit of context;\n",
"- We can [ensemble results](/docs/how_to/ensemble_retriever) from multiple retrievers;\n",
"- We can assign weights to documents, e.g., to weigh [recent documents](/docs/how_to/time_weighted_vectorstore/) higher.\n",
"\n",
"The [retrievers](/docs/how_to#retrievers) section of the how-to guides covers these and other built-in retrieval strategies.\n",
"\n",
"It is also straightforward to extend the [BaseRetriever](https://api.python.langchain.com/en/latest/retrievers/langchain_core.retrievers.BaseRetriever.html) class in order to implement custom retrievers. See our how-to guide [here](/docs/how_to/custom_retriever)."
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -458,7 +458,22 @@
"metadata": {},
"source": [
"### Initializing agent\n",
"\n",
"First, get required package **LangGraph**"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%pip install --upgrade --quiet langgraph"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We will use a prebuilt [LangGraph](/docs/concepts/#langgraph) agent to build our agent"
]
},
@ -472,7 +487,7 @@
"from langgraph.prebuilt import chat_agent_executor\n",
"\n",
"agent_executor = chat_agent_executor.create_tool_calling_executor(\n",
" llm, tools, system_message=system_message\n",
" llm, tools, messages_modifier=system_message\n",
")"
]
},
@ -711,7 +726,7 @@
"system_message = SystemMessage(content=system)\n",
"\n",
"agent = chat_agent_executor.create_tool_calling_executor(\n",
" llm, tools, system_message=system_message\n",
" llm, tools, messages_modifier=system_message\n",
")"
]
},

@ -84,7 +84,7 @@
"```\n",
"\n",
"\n",
"For more details, see our [Installation guide](/docs/installation).\n",
"For more details, see our [Installation guide](/docs/how_to/installation).\n",
"\n",
"### LangSmith\n",
"\n",

@ -9,7 +9,7 @@ sidebar_label: Overview
The following features have been added during the development of 0.1.x:
- Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events)
- Better streaming support via the [Event Streaming API](https://python.langchain.com/docs/expression_language/streaming/#using-stream-events).
- [Standardized tool calling support](https://blog.langchain.dev/tool-calling-with-langchain/)
- A standardized interface for [structuring output](https://github.com/langchain-ai/langchain/discussions/18154)
- [@chain decorator](https://python.langchain.com/docs/expression_language/how_to/decorator/) to more easily create **RunnableLambdas**
@ -20,6 +20,7 @@ The following features have been added during the development of 0.1.x:
- Interoperability of chat message histories across most providers
- [Over 20+ partner packages in python](https://python.langchain.com/docs/integrations/platforms/) for popular integrations
## Whats coming to LangChain?
- Weve been working hard on [langgraph](https://python.langchain.com/docs/langgraph/). We will be building more capabilities on top of it and focusing on making it the go-to framework for agent architectures.

@ -1,175 +0,0 @@
---
sidebar_position: 1
sidebar_label: v0.2
---
# LangChain v0.2
LangChain v0.2 was released in May 2024. This release includes a number of breaking changes and deprecations. This document contains a guide on upgrading to 0.2.x, as well as a list of deprecations and breaking changes.
## Migration
This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
1. install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using (e.g. langgraph, langchain-community, langchain-openai, etc.)
2. Verify that your code runs properly with the new packages (e.g., unit tests pass)
3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
4. Manually resolve any remaining deprecation warnings
5. Re-run unit tests
### Upgrade to new imports
We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
we hope that it will help you migrate your code more quickly.
The migration script has the following limitations:
1. Its limited to helping users move from old imports to new imports. It doesnt help address other deprecations.
2. It cant handle imports that involve `as` .
3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
4. It will likely miss some deprecated imports.
Here is an example of the import changes that the migration script can help apply automatically:
| From Package | To Package | Deprecated Import | New Import |
|---------------------|--------------------------|--------------------------------------------------------------------|---------------------------------------------------------------------|
| langchain | langchain-community | from langchain.vectorstores import InMemoryVectorStore | from langchain_community.vectorstores import InMemoryVectorStore |
| langchain-community | langchain_openai | from langchain_community.chat_models import ChatOpenAI | from langchain_openai import ChatOpenAI |
| langchain-community | langchain-core | from langchain_community.document_loaders import Blob | from langchain_core.document_loaders import Blob |
| langchain | langchain-core | from langchain.schema.document import Document | from langchain_core.documents import Document |
| langchain | langchain-text-splitters | from langchain.text_splitter import RecursiveCharacterTextSplitter | from langchain_text_splitters import RecursiveCharacterTextSplitter |
#### Deprecation timeline
We have two main types of deprecations:
1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```python
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```python
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
2. Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.
#### Installation
```bash
pip install langchain-cli
langchain-cli --version # <-- Make sure the version is at least 0.0.22
```
#### Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
You will need to run the migration script **twice** as it only applies one import replacement per run.
For example, say your code still uses `from langchain.chat_models import ChatOpenAI`:
After the first run, youll get: `from langchain_community.chat_models import ChatOpenAI`
After the second run, youll get: `from langchain_openai import ChatOpenAI`
```bash
# Run a first time
# Will replace from langchain.chat_models import ChatOpenAI
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
# Run a second time to apply more import replacements
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
```
#### Other options
```bash
# See help menu
langchain-cli migrate --help
# Preview Changes without applying
langchain-cli migrate --diff [path to code]
# Run on code including ipython notebooks
# Apply all import updates except for updates from langchain to langchain-core
langchain-cli migrate --disable langchain_to_core --include-ipynb [path to code]
```
## Deprecations and breaking changes
This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
### Breaking changes in 0.2.0
As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
The following functions and classes require an explicit LLM to be passed as an argument:
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
- `langchain.chains.openai_functions.get_openapi_chain`
- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
- `langchain.indexes.VectorStoreIndexWrapper.query`
- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
- `langchain.chains.flare.FlareChain`
The following classes now require passing an explicit Embedding model as an argument:
- `langchain.indexes.VectostoreIndexCreator`
The following code has been removed:
- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
### Deprecations
We have two main types of deprecations:
1. Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```python
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```python
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
2. Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
Many of these were marked for removal in 0.2. We have bumped the removal to 0.3.

@ -0,0 +1,897 @@
---
sidebar_position: 3
sidebar_label: Changes
---
# Deprecations and Breaking Changes
This code contains a list of deprecations and removals in the `langchain` and `langchain-core` packages.
New features and improvements are not listed here. See the [overview](/docs/versions/overview/) for a summary of what's new in this release.
## Breaking changes
As of release 0.2.0, `langchain` is required to be integration-agnostic. This means that code in `langchain` should not by default instantiate any specific chat models, llms, embedding models, vectorstores etc; instead, the user will be required to specify those explicitly.
The following functions and classes require an explicit LLM to be passed as an argument:
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreToolkit`
- `langchain.agents.agent_toolkits.vectorstore.toolkit.VectorStoreRouterToolkit`
- `langchain.chains.openai_functions.get_openapi_chain`
- `langchain.chains.router.MultiRetrievalQAChain.from_retrievers`
- `langchain.indexes.VectorStoreIndexWrapper.query`
- `langchain.indexes.VectorStoreIndexWrapper.query_with_sources`
- `langchain.indexes.VectorStoreIndexWrapper.aquery_with_sources`
- `langchain.chains.flare.FlareChain`
The following classes now require passing an explicit Embedding model as an argument:
- `langchain.indexes.VectostoreIndexCreator`
The following code has been removed:
- `langchain.natbot.NatBotChain.from_default` removed in favor of the `from_llm` class method.
Behavior was changed for the following code:
### @tool decorator
`@tool` decorator now assigns the function doc-string as the tool description. Previously, the `@tool` decorator
using to prepend the function signature.
Before 0.2.0:
```python
@tool
def my_tool(x: str) -> str:
"""Some description."""
return "something"
print(my_tool.description)
```
Would result in: `my_tool: (x: str) -> str - Some description.`
As of 0.2.0:
It will result in: `Some description.`
## Code that moved to another package
Code that was moved from `langchain` into another package (e.g, `langchain-community`)
If you try to import it from `langchain`, the import will keep on working, but will raise a deprecation warning. The warning will provide a replacement import statement.
```shell
python -c "from langchain.document_loaders.markdown import UnstructuredMarkdownLoader"
```
```shell
LangChainDeprecationWarning: Importing UnstructuredMarkdownLoader from langchain.document_loaders is deprecated. Please replace deprecated imports:
>> from langchain.document_loaders import UnstructuredMarkdownLoader
with new imports of:
>> from langchain_community.document_loaders import UnstructuredMarkdownLoader
```
We will continue supporting the imports in `langchain` until release 0.4 as long as the relevant package where the code lives is installed. (e.g., as long as `langchain_community` is installed.)
However, we advise for users to not rely on these imports and instead migrate to the new imports. To help with this process, were releasing a migration script via the LangChain CLI. See further instructions in migration guide.
## Code targeted for removal
Code that has better alternatives available and will eventually be removed, so theres only a single way to do things. (e.g., `predict_messages` method in ChatModels has been deprecated in favor of `invoke`).
### astream events V1
If you are using `astream_events`, please review how to [migrate to astream events v2](/docs/versions/v0_2/migrating_astream_events).
### langchain_core
#### try_load_from_hub
In module: `utils.loading`
Deprecated: 0.1.30
Removal: 0.3.0
Alternative: Using the hwchase17/langchain-hub repo for prompts is deprecated. Please use https://smith.langchain.com/hub instead.
#### BaseLanguageModel.predict
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLanguageModel.predict_messages
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLanguageModel.apredict
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLanguageModel.apredict_messages
In module: `language_models.base`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### RunTypeEnum
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use string instead.
#### TracerSessionV1Base
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionV1Create
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionV1
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSessionBase
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### TracerSession
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### BaseRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### LLMRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### ChainRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### ToolRun
In module: `tracers.schemas`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Run
#### BaseChatModel.__call__
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.call_as_llm
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.predict
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.predict_messages
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseChatModel.apredict
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseChatModel.apredict_messages
In module: `language_models.chat_models`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLLM.__call__
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.predict
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.predict_messages
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: invoke
#### BaseLLM.apredict
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseLLM.apredict_messages
In module: `language_models.llms`
Deprecated: 0.1.7
Removal: 0.3.0
Alternative: ainvoke
#### BaseRetriever.get_relevant_documents
In module: `retrievers`
Deprecated: 0.1.46
Removal: 0.3.0
Alternative: invoke
#### BaseRetriever.aget_relevant_documents
In module: `retrievers`
Deprecated: 0.1.46
Removal: 0.3.0
Alternative: ainvoke
#### ChatPromptTemplate.from_role_strings
In module: `prompts.chat`
Deprecated: 0.0.1
Removal:
Alternative: from_messages classmethod
#### ChatPromptTemplate.from_strings
In module: `prompts.chat`
Deprecated: 0.0.1
Removal:
Alternative: from_messages classmethod
#### BaseTool.__call__
In module: `tools`
Deprecated: 0.1.47
Removal: 0.3.0
Alternative: invoke
#### convert_pydantic_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### convert_pydantic_to_openai_tool
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
#### convert_python_function_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### format_tool_to_openai_function
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_function()
#### format_tool_to_openai_tool
In module: `utils.function_calling`
Deprecated: 0.1.16
Removal: 0.3.0
Alternative: langchain_core.utils.function_calling.convert_to_openai_tool()
### langchain
#### AgentType
In module: `agents.agent_types`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### Chain.__call__
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: invoke
#### Chain.acall
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: ainvoke
#### Chain.run
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: invoke
#### Chain.arun
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: ainvoke
#### Chain.apply
In module: `chains.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: batch
#### LLMChain
In module: `chains.llm`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: RunnableSequence, e.g., `prompt | llm`
#### LLMSingleActionAgent
In module: `agents.agent`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### Agent
In module: `agents.agent`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### OpenAIFunctionsAgent
In module: `agents.openai_functions_agent.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_openai_functions_agent
#### ZeroShotAgent
In module: `agents.mrkl.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### MRKLChain
In module: `agents.mrkl.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ConversationalAgent
In module: `agents.conversational.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### ConversationalChatAgent
In module: `agents.conversational_chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_json_chat_agent
#### ChatAgent
In module: `agents.chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_react_agent
#### OpenAIMultiFunctionsAgent
In module: `agents.openai_functions_multi_agent.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_openai_tools_agent
#### ReActDocstoreAgent
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### DocstoreExplorer
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ReActTextWorldAgent
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### ReActChain
In module: `agents.react.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### SelfAskWithSearchAgent
In module: `agents.self_ask_with_search.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_self_ask_with_search
#### SelfAskWithSearchChain
In module: `agents.self_ask_with_search.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### StructuredChatAgent
In module: `agents.structured_chat.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_structured_chat_agent
#### RetrievalQA
In module: `chains.retrieval_qa.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: create_retrieval_chain
#### load_agent_from_config
In module: `agents.loading`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### load_agent
In module: `agents.loading`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative:
#### initialize_agent
In module: `agents.initialize`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc.
#### XMLAgent
In module: `agents.xml.base`
Deprecated: 0.1.0
Removal: 0.3.0
Alternative: create_xml_agent
#### CohereRerank
In module: `retrievers.document_compressors.cohere_rerank`
Deprecated: 0.0.30
Removal: 0.3.0
Alternative: langchain_cohere.CohereRerank
#### ConversationalRetrievalChain
In module: `chains.conversational_retrieval.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: create_history_aware_retriever together with create_retrieval_chain (see example in docstring)
#### create_extraction_chain_pydantic
In module: `chains.openai_tools.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_openai_fn_runnable
In module: `chains.structured_output.base`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_structured_output_runnable
In module: `chains.structured_output.base`
Deprecated: 0.1.17
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_openai_fn_chain
In module: `chains.openai_functions.base`
Deprecated: 0.1.1
Removal: 0.3.0
Alternative: create_openai_fn_runnable
#### create_structured_output_chain
In module: `chains.openai_functions.base`
Deprecated: 0.1.1
Removal: 0.3.0
Alternative: ChatOpenAI.with_structured_output
#### create_extraction_chain
In module: `chains.openai_functions.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.
#### create_extraction_chain_pydantic
In module: `chains.openai_functions.extraction`
Deprecated: 0.1.14
Removal: 0.3.0
Alternative: with_structured_output method on chat models that support tool calling.

@ -0,0 +1,90 @@
---
sidebar_position: 1
---
# LangChain v0.2
LangChain v0.2 was released in May 2024. This release includes a number of [breaking changes and deprecations](/docs/versions/v0_2/deprecations). This document contains a guide on upgrading to 0.2.x.
:::note Reference
- [Breaking Changes & Deprecations](/docs/versions/v0_2/deprecations)
:::
# Migration
This documentation will help you upgrade your code to LangChain `0.2.x.`. To prepare for migration, we first recommend you take the following steps:
1. Install the 0.2.x versions of langchain-core, langchain and upgrade to recent versions of other packages that you may be using. (e.g. langgraph, langchain-community, langchain-openai, etc.)
2. Verify that your code runs properly with the new packages (e.g., unit tests pass).
3. Install a recent version of `langchain-cli` , and use the tool to replace old imports used by your code with the new imports. (See instructions below.)
4. Manually resolve any remaining deprecation warnings.
5. Re-run unit tests.
## Upgrade to new imports
We created a tool to help migrate your code. This tool is still in **beta** and may not cover all cases, but
we hope that it will help you migrate your code more quickly.
The migration script has the following limitations:
1. Its limited to helping users move from old imports to new imports. It does not help address other deprecations.
2. It cant handle imports that involve `as` .
3. New imports are always placed in global scope, even if the old import that was replaced was located inside some local scope (e..g, function body).
4. It will likely miss some deprecated imports.
Here is an example of the import changes that the migration script can help apply automatically:
| From Package | To Package | Deprecated Import | New Import |
|---------------------|--------------------------|--------------------------------------------------------------------|---------------------------------------------------------------------|
| langchain | langchain-community | from langchain.vectorstores import InMemoryVectorStore | from langchain_community.vectorstores import InMemoryVectorStore |
| langchain-community | langchain_openai | from langchain_community.chat_models import ChatOpenAI | from langchain_openai import ChatOpenAI |
| langchain-community | langchain-core | from langchain_community.document_loaders import Blob | from langchain_core.document_loaders import Blob |
| langchain | langchain-core | from langchain.schema.document import Document | from langchain_core.documents import Document |
| langchain | langchain-text-splitters | from langchain.text_splitter import RecursiveCharacterTextSplitter | from langchain_text_splitters import RecursiveCharacterTextSplitter |
## Installation
```bash
pip install langchain-cli
langchain-cli --version # <-- Make sure the version is at least 0.0.22
```
## Usage
Given that the migration script is not perfect, you should make sure you have a backup of your code first (e.g., using version control like `git`).
You will need to run the migration script **twice** as it only applies one import replacement per run.
For example, say your code still uses `from langchain.chat_models import ChatOpenAI`:
After the first run, youll get: `from langchain_community.chat_models import ChatOpenAI`
After the second run, youll get: `from langchain_openai import ChatOpenAI`
```bash
# Run a first time
# Will replace from langchain.chat_models import ChatOpenAI
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
# Run a second time to apply more import replacements
langchain-cli migrate [path to code] --diff # Preview
langchain-cli migrate [path to code] # Apply
```
### Other options
```bash
# See help menu
langchain-cli migrate --help
# Preview Changes without applying
langchain-cli migrate --diff [path to code]
# Run on code including ipython notebooks
# Apply all import updates except for updates from langchain to langchain-core
langchain-cli migrate --disable langchain_to_core --include-ipynb [path to code]
```

@ -0,0 +1,124 @@
---
sidebar_position: 2
sidebar_label: astream_events v2
---
# Migrating to Astream Events v2
:::danger
This migration guide is a work in progress and is not complete. Please wait to migrate astream_events.
:::
We've added a `v2` of the astream_events API with the release of `0.2.0`. You can see this [PR](https://github.com/langchain-ai/langchain/pull/21638) for more details.
The `v2` version is a re-write of the `v1` version, and should be more efficient, with more consistent output for the events. The `v1` version of the API will be deprecated in favor of the `v2` version and will be removed in `0.4.0`.
Below is a list of changes between the `v1` and `v2` versions of the API.
### output for `on_chat_model_end`
In `v1`, the outputs associated with `on_chat_model_end` changed depending on whether the
chat model was run as a root level runnable or as part of a chain.
As a root level runnable the output was:
```python
"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
```
As part of a chain the output was:
```
"data": {
"output": {
"generations": [
[
{
"generation_info": None,
"message": AIMessageChunk(
content="hello world!", id=AnyStr()
),
"text": "hello world!",
"type": "ChatGenerationChunk",
}
]
],
"llm_output": None,
}
},
```
As of `v2`, the output will always be the simpler representation:
```python
"data": {"output": AIMessageChunk(content="hello world!", id='some id')}
```
:::note
Non chat models (i.e., regular LLMs) are will be consistently associated with the more verbose format for now.
:::
### output for `on_retriever_end`
`on_retriever_end` output will always return a list of `Documents`.
Before:
```python
{
"data": {
"output": [
Document(...),
Document(...),
...
]
}
}
```
### Removed `on_retriever_stream`
The `on_retriever_stream` event was an artifact of the implementation and has been removed.
Full information associated with the event is already available in the `on_retriever_end` event.
Please use `on_retriever_end` instead.
### Removed `on_tool_stream`
The `on_tool_stream` event was an artifact of the implementation and has been removed.
Full information associated with the event is already available in the `on_tool_end` event.
Please use `on_tool_end` instead.
### Propagating Names
Names of runnables have been updated to be more consistent.
```python
model = GenericFakeChatModel(messages=infinite_cycle).configurable_fields(
messages=ConfigurableField(
id="messages",
name="Messages",
description="Messages return by the LLM",
)
)
```
In `v1`, the event name was `RunnableConfigurableFields`.
In `v2`, the event name is `GenericFakeChatModel`.
If you're filtering by event names, check if you need to update your filters.
### RunnableRetry
Usage of [RunnableRetry](https://api.python.langchain.com/en/latest/runnables/langchain_core.runnables.retry.RunnableRetry.html)
within an LCEL chain being streamed generated an incorrect `on_chain_end` event in `v1` corresponding
to the failed runnable invocation that was being retried. This event has been removed in `v2`.
No action is required for this change.

@ -124,7 +124,7 @@ const config = {
/** @type {import('@docusaurus/preset-classic').ThemeConfig} */
({
announcementBar: {
content: 'You are viewing the <strong>preview</strong> LangChain v0.2 docs. Note that 0.2 Search features are currently unstable and in progress. View the <a href="/v0.1/docs/get_started/introduction/">stable 0.1 docs here</a>.',
content: 'LangChain 0.2 is out! Leave feedback on the v0.2 docs <a href="https://github.com/langchain-ai/langchain/discussions/21716">here</a>. You can view the v0.1 docs <a href="/v0.1/docs/get_started/introduction/">here</a>.',
isCloseable: true,
},
docs: {
@ -195,13 +195,32 @@ const config = {
{
type: "doc",
docId: "additional_resources/tutorials",
label: "Tutorials"
label: "3rd party tutorials"
},
{
type: "doc",
docId: "additional_resources/youtube",
label: "YouTube"
},
{
to: "/docs/additional_resources/arxiv_references",
label: "arXiv"
},
]
},
{
type: "dropdown",
label: "v0.2",
position: "right",
items: [
{
label: "v0.2",
href: "/docs/introduction"
},
{
label: "v0.1",
href: "https://python.langchain.com/v0.1/docs/get_started/introduction"
}
]
},
{
@ -310,9 +329,9 @@ const config = {
// this is linked to erick@langchain.dev currently
apiKey: "6c01842d6a88772ed2236b9c85806441",
indexName: "python-langchain",
indexName: "python-langchain-0.2",
contextualSearch: true,
contextualSearch: false,
},
}),

@ -0,0 +1,465 @@
"""Parse arXiv references from the documentation.
Generate a page with a table of the arXiv references with links to the documentation pages.
"""
import logging
import os
import re
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Set
from pydantic.v1 import BaseModel, root_validator
# TODO parse docstrings for arXiv references
# TODO Generate a page with a table of the references with correspondent modules/classes/functions.
logger = logging.getLogger(__name__)
_ROOT_DIR = Path(os.path.abspath(__file__)).parents[2]
DOCS_DIR = _ROOT_DIR / "docs" / "docs"
CODE_DIR = _ROOT_DIR / "libs"
ARXIV_ID_PATTERN = r"https://arxiv\.org/(abs|pdf)/(\d+\.\d+)"
@dataclass
class ArxivPaper:
"""ArXiv paper information."""
arxiv_id: str
referencing_docs: list[str] # TODO: Add the referencing docs
referencing_api_refs: list[str] # TODO: Add the referencing docs
title: str
authors: list[str]
abstract: str
url: str
published_date: str
def search_documentation_for_arxiv_references(docs_dir: Path) -> dict[str, set[str]]:
"""Search the documentation for arXiv references.
Search for the arXiv references in the documentation pages.
Note: It finds only the first arXiv reference in a line.
Args:
docs_dir: Path to the documentation root folder.
Returns:
dict: Dictionary with arxiv_id as key and set of file names as value.
"""
arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN)
exclude_strings = {"file_path", "metadata", "link", "loader", "PyPDFLoader"}
# loop all the files (ipynb, mdx, md) in the docs folder
files = (
p.resolve()
for p in Path(docs_dir).glob("**/*")
if p.suffix in {".ipynb", ".mdx", ".md"}
)
arxiv_id2file_names: dict[str, set[str]] = {}
for file in files:
if "-checkpoint.ipynb" in file.name:
continue
with open(file, "r", encoding="utf-8") as f:
lines = f.readlines()
for line in lines:
if any(exclude_string in line for exclude_string in exclude_strings):
continue
matches = arxiv_url_pattern.search(line)
if matches:
arxiv_id = matches.group(2)
file_name = _get_doc_path(file.parts, file.suffix)
if arxiv_id not in arxiv_id2file_names:
arxiv_id2file_names[arxiv_id] = {file_name}
else:
arxiv_id2file_names[arxiv_id].add(file_name)
return arxiv_id2file_names
def convert_module_name_and_members_to_urls(
arxiv_id2module_name_and_members: dict[str, set[str]],
) -> dict[str, set[str]]:
arxiv_id2urls = {}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
urls = set()
for module_name_and_member in module_name_and_members:
module_name, type_and_member = module_name_and_member.split(":")
if "$" in type_and_member:
type, member = type_and_member.split("$")
else:
type = type_and_member
member = ""
_namespace_parts = module_name.split(".")
if type == "module":
first_namespace_part = _namespace_parts[0]
if first_namespace_part.startswith("langchain_"):
first_namespace_part = first_namespace_part.replace(
"langchain_", ""
)
url = f"{first_namespace_part}_api_reference.html#module-{module_name}"
elif type in ["class", "function"]:
second_namespace_part = _namespace_parts[1]
url = f"{second_namespace_part}/{module_name}.{member}.html#{module_name}.{member}"
else:
raise ValueError(
f"Unknown type: {type} in the {module_name_and_member}."
)
urls.add(url)
arxiv_id2urls[arxiv_id] = urls
return arxiv_id2urls
def search_code_for_arxiv_references(code_dir: Path) -> dict[str, set[str]]:
"""Search the code for arXiv references.
Search for the arXiv references in the code.
Note: It finds only the first arXiv reference in a line.
Args:
code_dir: Path to the code root folder.
Returns:
dict: Dictionary with arxiv_id as key and set of module names as value.
module names encoded as:
<module_name>:module
<module_name>:class$<ClassName>
<module_name>:function$<function_name>
"""
arxiv_url_pattern = re.compile(ARXIV_ID_PATTERN)
# exclude_strings = {"file_path", "metadata", "link", "loader"}
class_pattern = re.compile(r"\s*class\s+(\w+).*:")
function_pattern = re.compile(r"\s*def\s+(\w+)")
# loop all the files (ipynb, mdx, md) in the docs folder
files = (
p.resolve()
for p in Path(code_dir).glob("**/*")
if p.suffix in {".py"} and "tests" not in p.parts and "scripts" not in p.parts
# ".md" files are excluded
)
arxiv_id2module_name_and_members: dict[str, set[str]] = {}
for file in files:
try:
with open(file, "r", encoding="utf-8") as f:
module_name = _get_module_name(file.parts)
class_or_function_started = "module"
for line in f.readlines():
# class line:
matches = class_pattern.search(line)
if matches:
class_name = matches.group(1)
class_or_function_started = f"class${class_name}"
# function line:
# not inside a class!
if "class" not in class_or_function_started:
matches = function_pattern.search(line)
if matches:
func_name = matches.group(1)
class_or_function_started = f"function${func_name}"
# arxiv line:
matches = arxiv_url_pattern.search(line)
if matches:
arxiv_id = matches.group(2)
module_name_and_member = (
f"{module_name}:{class_or_function_started}"
)
if arxiv_id not in arxiv_id2module_name_and_members:
arxiv_id2module_name_and_members[arxiv_id] = {
module_name_and_member
}
else:
arxiv_id2module_name_and_members[arxiv_id].add(
module_name_and_member
)
except UnicodeDecodeError:
# Skip files like this 'tests/integration_tests/examples/non-utf8-encoding.py'
logger.warning(f"Could not read the file {file}.")
# handle border cases:
# 1. {'langchain_experimental.pal_chain.base:class$PALChain', 'langchain_experimental.pal_chain.base:module' - remove}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
module_name_and_member_deduplicated = set()
non_module_members = set()
for module_name_and_member in module_name_and_members:
if not module_name_and_member.endswith(":module"):
module_name_and_member_deduplicated.add(module_name_and_member)
non_module_members.add(module_name_and_member.split(":")[0])
for module_name_and_member in module_name_and_members:
if module_name_and_member.endswith(":module"):
if module_name_and_member.split(":")[0] in non_module_members:
continue
module_name_and_member_deduplicated.add(module_name_and_member)
arxiv_id2module_name_and_members[arxiv_id] = module_name_and_member_deduplicated
# 2. {'langchain.evaluation.scoring.prompt:module', 'langchain.evaluation.comparison.prompt:module'}
# only modules with 2-part namespaces are parsed into API Reference now! TODO fix this behavior
# leave only the modules with 2-part namespaces
arxiv_id2module_name_and_members_reduced = {}
for arxiv_id, module_name_and_members in arxiv_id2module_name_and_members.items():
module_name_and_member_reduced = set()
removed_modules = set()
for module_name_and_member in module_name_and_members:
if module_name_and_member.endswith(":module"):
if module_name_and_member.split(":")[0].count(".") <= 1:
module_name_and_member_reduced.add(module_name_and_member)
else:
removed_modules.add(module_name_and_member)
else:
module_name_and_member_reduced.add(module_name_and_member)
if module_name_and_member_reduced:
arxiv_id2module_name_and_members_reduced[arxiv_id] = (
module_name_and_member_reduced
)
if removed_modules:
logger.warning(
f"{arxiv_id}: Removed the following modules with 2+ -part namespaces: {removed_modules}."
)
return arxiv_id2module_name_and_members_reduced
def _get_doc_path(file_parts: tuple[str, ...], file_extension) -> str:
"""Get the relative path to the documentation page
from the absolute path of the file.
Remove file_extension
"""
res = []
for el in file_parts[::-1]:
res.append(el)
if el == "docs":
break
ret = "/".join(reversed(res))
return ret[: -len(file_extension)] if ret.endswith(file_extension) else ret
def _get_code_path(file_parts: tuple[str, ...]) -> str:
"""Get the relative path to the documentation page
from the absolute path of the file.
"""
res = []
for el in file_parts[::-1]:
res.append(el)
if el == "libs":
break
return "/".join(reversed(res))
def _get_module_name(file_parts: tuple[str, ...]) -> str:
"""Get the module name from the absolute path of the file."""
ns_parts = []
for el in file_parts[::-1]:
if str(el) == "__init__.py":
continue
ns_parts.insert(0, str(el).replace(".py", ""))
if el.startswith("langchain"):
break
return ".".join(ns_parts)
def compound_urls(
arxiv_id2file_names: dict[str, set[str]], arxiv_id2code_urls: dict[str, set[str]]
) -> dict[str, dict[str, set[str]]]:
arxiv_id2urls = dict()
for arxiv_id, code_urls in arxiv_id2code_urls.items():
arxiv_id2urls[arxiv_id] = {"api": code_urls}
# intersection of the two sets
if arxiv_id in arxiv_id2file_names:
arxiv_id2urls[arxiv_id]["docs"] = arxiv_id2file_names[arxiv_id]
for arxiv_id, file_names in arxiv_id2file_names.items():
if arxiv_id not in arxiv_id2code_urls:
arxiv_id2urls[arxiv_id] = {"docs": file_names}
# reverse sort by the arxiv_id (the newest papers first)
ret = dict(sorted(arxiv_id2urls.items(), key=lambda item: item[0], reverse=True))
return ret
def _format_doc_link(doc_paths: list[str]) -> list[str]:
return [
f"[{doc_path}](https://python.langchain.com/{doc_path})"
for doc_path in doc_paths
]
def _format_api_ref_link(
doc_paths: list[str], compact: bool = False
) -> list[str]: # TODO
# agents/langchain_core.agents.AgentAction.html#langchain_core.agents.AgentAction
ret = []
for doc_path in doc_paths:
module = doc_path.split("#")[1].replace("module-", "")
if compact and module.count(".") > 2:
# langchain_community.llms.oci_data_science_model_deployment_endpoint.OCIModelDeploymentTGI
# -> langchain_community.llms...OCIModelDeploymentTGI
module_parts = module.split(".")
module = f"{module_parts[0]}.{module_parts[1]}...{module_parts[-1]}"
ret.append(
f"[{module}](https://api.python.langchain.com/en/latest/{doc_path.split('langchain.com/')[-1]})"
)
return ret
def log_results(arxiv_id2urls):
arxiv_ids = arxiv_id2urls.keys()
doc_number, api_number = 0, 0
for urls in arxiv_id2urls.values():
if "docs" in urls:
doc_number += len(urls["docs"])
if "api" in urls:
api_number += len(urls["api"])
logger.info(
f"Found {len(arxiv_ids)} arXiv references in the {doc_number} docs and in {api_number} API Refs."
)
class ArxivAPIWrapper(BaseModel):
arxiv_search: Any #: :meta private:
arxiv_exceptions: Any # :meta private:
@root_validator()
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that the python package exists in environment."""
try:
import arxiv
values["arxiv_search"] = arxiv.Search
values["arxiv_exceptions"] = (
arxiv.ArxivError,
arxiv.UnexpectedEmptyPageError,
arxiv.HTTPError,
)
except ImportError:
raise ImportError(
"Could not import arxiv python package. "
"Please install it with `pip install arxiv`."
)
return values
def get_papers(
self, arxiv_id2urls: dict[str, dict[str, set[str]]]
) -> list[ArxivPaper]:
"""
Performs an arxiv search and returns information about the papers found.
If an error occurs or no documents found, error text
is returned instead.
Args:
arxiv_id2urls: Dictionary with arxiv_id as key and dictionary
with sets of doc file names and API Ref urls.
Returns:
List of ArxivPaper objects.
""" # noqa: E501
def cut_authors(authors: list) -> list[str]:
if len(authors) > 3:
return [str(a) for a in authors[:3]] + [" et al."]
else:
return [str(a) for a in authors]
if not arxiv_id2urls:
return []
try:
arxiv_ids = list(arxiv_id2urls.keys())
results = self.arxiv_search(
id_list=arxiv_ids,
max_results=len(arxiv_ids),
).results()
except self.arxiv_exceptions as ex:
raise ex
papers = [
ArxivPaper(
arxiv_id=result.entry_id.split("/")[-1],
title=result.title,
authors=cut_authors(result.authors),
abstract=result.summary,
url=result.entry_id,
published_date=str(result.published.date()),
referencing_docs=urls["docs"] if "docs" in urls else [],
referencing_api_refs=urls["api"] if "api" in urls else [],
)
for result, urls in zip(results, arxiv_id2urls.values())
]
return papers
def generate_arxiv_references_page(file_name: str, papers: list[ArxivPaper]) -> None:
with open(file_name, "w") as f:
# Write the table headers
f.write("""# arXiv
LangChain implements the latest research in the field of Natural Language Processing.
This page contains `arXiv` papers referenced in the LangChain Documentation and API Reference.
## Summary
| arXiv id / Title | Authors | Published date 🔻 | LangChain Documentation and API Reference |
|------------------|---------|-------------------|-------------------------|
""")
for paper in papers:
refs = []
if paper.referencing_docs:
refs += [
"`Docs:` " + ", ".join(_format_doc_link(paper.referencing_docs))
]
if paper.referencing_api_refs:
refs += [
"`API:` "
+ ", ".join(
_format_api_ref_link(paper.referencing_api_refs, compact=True)
)
]
refs_str = ", ".join(refs)
title_link = f"[{paper.title}]({paper.url})"
f.write(
f"| {' | '.join([f'`{paper.arxiv_id}` {title_link}', ', '.join(paper.authors), paper.published_date, refs_str])}\n"
)
for paper in papers:
docs_refs = (
f"- **LangChain Documentation:** {', '.join(_format_doc_link(paper.referencing_docs))}"
if paper.referencing_docs
else ""
)
api_ref_refs = (
f"- **LangChain API Reference:** {', '.join(_format_api_ref_link(paper.referencing_api_refs))}"
if paper.referencing_api_refs
else ""
)
f.write(f"""
## {paper.title}
- **arXiv id:** {paper.arxiv_id}
- **Title:** {paper.title}
- **Authors:** {', '.join(paper.authors)}
- **Published Date:** {paper.published_date}
- **URL:** {paper.url}
{docs_refs}
{api_ref_refs}
**Abstract:** {paper.abstract}
""")
logger.info(f"Created the {file_name} file with {len(papers)} arXiv references.")
def main():
# search the documentation and the API Reference for arXiv references:
arxiv_id2module_name_and_members = search_code_for_arxiv_references(CODE_DIR)
arxiv_id2code_urls = convert_module_name_and_members_to_urls(
arxiv_id2module_name_and_members
)
arxiv_id2file_names = search_documentation_for_arxiv_references(DOCS_DIR)
arxiv_id2urls = compound_urls(arxiv_id2file_names, arxiv_id2code_urls)
log_results(arxiv_id2urls)
# get the arXiv paper information
papers = ArxivAPIWrapper().get_papers(arxiv_id2urls)
# generate the arXiv references page
output_file = str(DOCS_DIR / "additional_resources" / "arxiv_references.mdx")
generate_arxiv_references_page(output_file, papers)
if __name__ == "__main__":
main()

@ -21,7 +21,7 @@ if __name__ == "__main__":
with open(full_destination, "r") as f:
content = f.read()
# remove images
content = re.sub("\!\[.*?\]\((.*?)\)", "", content)
content = re.sub(r"\!\[.*?\]\((.*?)\)", "", content)
with open(full_destination, "w") as f:
f.write(content)
@ -39,7 +39,7 @@ sidebar_class_name: hidden
content = f.read()
# replace relative links
content = re.sub("\]\(\.\.\/", "](/docs/templates/", content)
content = re.sub(r"\]\(\.\.\/", "](/docs/templates/", content)
with open(templates_index_intermediate, "w") as f:
f.write(sidebar_hidden + content)

@ -1,9 +1,7 @@
import os
import sys
from pathlib import Path
from langchain_community import chat_models, llms
from langchain_core.language_models.chat_models import BaseChatModel, SimpleChatModel
from langchain_community import llms
from langchain_core.language_models.llms import LLM, BaseLLM
LLM_IGNORE = ("FakeListLLM", "OpenAIChat", "PromptLayerOpenAIChat")
@ -16,49 +14,85 @@ LLM_FEAT_TABLE_CORRECTION = {
}
CHAT_MODEL_IGNORE = ("FakeListChatModel", "HumanInputChatModel")
CHAT_MODEL_FEAT_TABLE_CORRECTION = {
"ChatMLflowAIGateway": {"_agenerate": False},
"PromptLayerChatOpenAI": {"_stream": False, "_astream": False},
"ChatKonko": {"_astream": False, "_agenerate": False},
CHAT_MODEL_FEAT_TABLE = {
"ChatAnthropic": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-anthropic",
"link": "/docs/integrations/chat/anthropic/",
},
"ChatMistralAI": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-mistralai",
"link": "/docs/integrations/chat/mistralai/",
},
"ChatFireworks": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-fireworks",
"link": "/docs/integrations/chat/fireworks/",
},
"AzureChatOpenAI": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-openai",
"link": "/docs/integrations/chat/azure_chat_openai/",
},
"ChatOpenAI": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-openai",
"link": "/docs/integrations/chat/openai/",
},
"ChatTogether": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-together",
"link": "/docs/integrations/chat/together/",
},
"ChatVertexAI": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-google-vertexai",
"link": "/docs/integrations/chat/google_vertex_ai_palm/",
},
"ChatGroq": {
"tool_calling": True,
"structured_output": True,
"json_mode": True,
"package": "langchain-groq",
"link": "/docs/integrations/chat/groq/",
},
"ChatCohere": {
"tool_calling": True,
"structured_output": True,
"package": "langchain-cohere",
"link": "/docs/integrations/chat/cohere/",
},
"ChatBedrock": {
"tool_calling": True,
"package": "langchain-aws",
"link": "/docs/integrations/chat/bedrock/",
},
"ChatHuggingFace": {
"local": True,
"package": "langchain-huggingface",
"link": "/docs/integrations/chat/huggingface/",
},
"ChatOllama": {
"local": True,
"package": "langchain-community",
"link": "/docs/integrations/chat/ollama/",
},
"vLLM Chat (via ChatOpenAI)": {
"local": True,
"package": "langchain-community",
"link": "/docs/integrations/chat/vllm/",
},
}
@ -88,19 +122,14 @@ CHAT_MODEL_TEMPLATE = """\
---
sidebar_position: 0
sidebar_class_name: hidden
keywords: [compatibility, bind_tools, tool calling, function calling, structured output, with_structured_output]
keywords: [compatibility, bind_tools, tool calling, function calling, structured output, with_structured_output, json mode, local model]
---
# Chat models
## Features (natively supported)
All ChatModels implement the Runnable interface, which comes with default implementations of all methods, ie. `ainvoke`, `batch`, `abatch`, `stream`, `astream`. This gives all ChatModels basic support for async, streaming and batch, which by default is implemented as below:
- *Async* support defaults to calling the respective sync method in asyncio's default thread pool executor. This lets other async functions in your application make progress while the ChatModel is being executed, by moving this call to a background thread.
- *Streaming* support defaults to returning an `Iterator` (or `AsyncIterator` in the case of async streaming) of a single value, the final result returned by the underlying ChatModel provider. This obviously doesn't give you token-by-token streaming, which requires native support from the ChatModel provider, but ensures your code that expects an iterator of tokens can work for any of our ChatModel integrations.
- *Batch* support defaults to calling the underlying ChatModel in parallel for each input by making use of a thread pool executor (in the sync batch case) or `asyncio.gather` (in the async batch case). The concurrency can be controlled with the `max_concurrency` key in `RunnableConfig`.
## Advanced features
Each ChatModel integration can optionally provide native implementations to truly enable async or streaming.
The table shows, for each integration, which features have been implemented with native support.
The following table shows all the chat models that support one or more advanced features.
{table}
@ -163,47 +192,30 @@ def get_llm_table():
def get_chat_model_table() -> str:
"""Get the table of chat models."""
feat_table = {}
for cm in chat_models.__all__:
feat_table[cm] = {}
cls = getattr(chat_models, cm)
if issubclass(cls, SimpleChatModel):
comparison_cls = SimpleChatModel
else:
comparison_cls = BaseChatModel
for feat in ("_stream", "_astream", "_agenerate"):
feat_table[cm][feat] = getattr(cls, feat) != getattr(comparison_cls, feat)
final_feats = {
k: v
for k, v in {**feat_table, **CHAT_MODEL_FEAT_TABLE_CORRECTION}.items()
if k not in CHAT_MODEL_IGNORE
}
header = [
"model",
"_agenerate",
"_stream",
"_astream",
"tool_calling",
"structured_output",
"json_mode",
"local",
"package",
]
title = [
"Model",
"Invoke",
"Async invoke",
"Stream",
"Async stream",
"[Tool calling](/docs/how_to/tool_calling/)",
"[Structured output](/docs/how_to/structured_output/)",
"Python Package",
"JSON mode",
"Local",
"Package",
]
rows = [title, [":-"] + [":-:"] * (len(title) - 1)]
for llm, feats in sorted(final_feats.items()):
for llm, feats in sorted(CHAT_MODEL_FEAT_TABLE.items()):
# Fields are in the order of the header
row = [llm, ""]
row = [
f"[{llm}]({feats['link']})",
]
for h in header[1:]:
value = feats.get(h)
index = header.index(h)
if h == "package":
row.append(value or "langchain-community")
else:

@ -27,7 +27,9 @@ class EscapePreprocessor(Preprocessor):
)
# rewrite .ipynb links to .md
cell.source = re.sub(
r"\[([^\]]*)\]\(([^)]*).ipynb\)", r"[\1](\2.md)", cell.source
r"\[([^\]]*)\]\((?![^\)]*//)([^)]*)\.ipynb\)",
r"[\1](\2.md)",
cell.source,
)
return cell, resources
@ -84,12 +86,8 @@ class CustomRegexRemovePreprocessor(Preprocessor):
pattern = re.compile(r"(?s)(?:\s*\Z)|(?:.*#\s*\|\s*output:\s*false.*)")
rtn = not pattern.match(cell.source)
if not rtn:
print("--remove--")
print(cell.source)
return False
else:
print("--keep--")
print(cell.source)
return True
def preprocess(self, nb, resources):

@ -11,7 +11,7 @@ def update_links(doc_path, docs_link):
content = f.read()
# replace relative links
content = re.sub("\]\(\.\/", f"]({docs_link}", content)
content = re.sub(r"\]\(\.\/", f"]({docs_link}", content)
with open(DOCS_DIR / doc_path, "w") as f:
f.write(content)

@ -21,12 +21,9 @@
module.exports = {
docs: [
{
type: "category",
label: "Introduction",
collapsed: false,
collapsible: false,
link: { type: "doc", id: "introduction" },
items: ["installation"],
type: "doc",
label: "Introduction",
id: "introduction",
},
{
type: "category",
@ -72,13 +69,24 @@ module.exports = {
collapsed: false,
collapsible: false,
items: [
"versions/overview",
"versions/release_policy",
"versions/packages",
{
type: "autogenerated",
dirName: "versions",
}
type: "category",
label: "v0.2",
link: {type: 'doc', id: 'versions/v0_2/index'},
collapsible: false,
collapsed: false,
items: [{
type: 'autogenerated',
dirName: 'versions/v0_2',
className: 'hidden',
}],
},
],
},
"security",
"security"
],
integrations: [
{

@ -147,7 +147,7 @@ nav, h1, h2, h3, h4 {
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-category:not(:first-of-type),
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link,
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link.theme-doc-sidebar-item-link-level-1 {
.theme-doc-sidebar-menu > .theme-doc-sidebar-item-link.theme-doc-sidebar-item-link-level-1:not(:first-of-type) {
margin-top: 1rem;
}

@ -0,0 +1,22 @@
import React from "react";
import DocPaginator from "@theme-original/DocPaginator";
const BLACKLISTED_PATHS = ["/docs/how_to/", "/docs/tutorials/"];
export default function DocPaginatorWrapper(props) {
const [shouldHide, setShouldHide] = React.useState(false);
React.useEffect(() => {
if (typeof window === "undefined") return;
const currentPath = window.location.pathname;
if (BLACKLISTED_PATHS.some((path) => currentPath.includes(path))) {
setShouldHide(true);
}
}, []);
if (!shouldHide) {
// eslint-disable-next-line react/jsx-props-no-spreading
return <DocPaginator {...props} />;
}
return null;
}

@ -20,6 +20,10 @@
{
"source": "/docs/:path(.*/?)*",
"destination": "/v0.1/docs/:path*"
},
{
"source": "/cookbook(/?)",
"destination": "/v0.1/docs/cookbook/"
}
]
}

@ -1,3 +1,4 @@
-e ../libs/core
-e ../libs/langchain
-e ../libs/community
-e ../libs/experimental
@ -6,7 +7,5 @@ langchain-cohere
langchain-astradb
langchain-nvidia-ai-endpoints
langchain-elasticsearch
langchain-postgres
urllib3==1.26.18
nbconvert==7.16.4
langchain-core==0.1.52

@ -2,18 +2,16 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.utilities.requests import Requests
if TYPE_CHECKING:
from langchain.chains.api.openapi.chain import OpenAPIEndpointChain
class NLATool(Tool):
"""Natural Language API Tool."""

@ -1045,11 +1045,43 @@ class CassandraCache(BaseCache):
"""
Cache that uses Cassandra / Astra DB as a backend.
Example:
.. code-block:: python
import cassio
from langchain_community.cache import CassandraCache
from langchain_core.globals import set_llm_cache
cassio.init(auto=True) # Requires env. variables, see CassIO docs
set_llm_cache(CassandraCache())
It uses a single Cassandra table.
The lookup keys (which get to form the primary key) are:
- prompt, a string
- llm_string, a deterministic str representation of the model parameters.
(needed to prevent collisions same-prompt-different-model collisions)
(needed to prevent same-prompt-different-model collisions)
Args:
session: an open Cassandra session.
Leave unspecified to use the global cassio init (see below)
keyspace: the keyspace to use for storing the cache.
Leave unspecified to use the global cassio init (see below)
table_name: name of the Cassandra table to use as cache
ttl_seconds: time-to-live for cache entries
(default: None, i.e. forever)
setup_mode: a value in langchain_community.utilities.cassandra.SetupMode.
Choose between SYNC, ASYNC and OFF - the latter if the Cassandra
table is guaranteed to exist already, for a faster initialization.
Note:
The session and keyspace parameters, when left out (or passed as None),
fall back to the globally-available cassio settings if any are available.
In other words, if a previously-run 'cassio.init(...)' has been
executed previously anywhere in the code, Cassandra-based objects
need not specify the connection parameters at all.
"""
def __init__(
@ -1061,25 +1093,21 @@ class CassandraCache(BaseCache):
skip_provisioning: bool = False,
setup_mode: CassandraSetupMode = CassandraSetupMode.SYNC,
):
"""
Initialize with a ready session and a keyspace name.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
table_name (str): name of the Cassandra table to use as cache
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
"""
if skip_provisioning:
warn_deprecated(
"0.0.33", alternative="Use setup_mode=CassandraSetupMode.OFF instead."
"0.0.33",
name="skip_provisioning",
alternative=(
"setup_mode=langchain_community.utilities.cassandra.SetupMode.OFF"
),
pending=True,
)
try:
from cassio.table import ElasticCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
"Please install it with `pip install -U cassio`."
)
self.session = session
@ -1170,6 +1198,7 @@ class CassandraCache(BaseCache):
await self.kv_cache.aclear()
# This constant is in fact a similarity - the 'distance' name is kept for compatibility:
CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC = "dot"
CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD = 0.85
CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME = "langchain_llm_semantic_cache"
@ -1182,60 +1211,117 @@ class CassandraSemanticCache(BaseCache):
Cache that uses Cassandra as a vector-store backend for semantic
(i.e. similarity-based) lookup.
Example:
.. code-block:: python
import cassio
from langchain_community.cache import CassandraSemanticCache
from langchain_core.globals import set_llm_cache
cassio.init(auto=True) # Requires env. variables, see CassIO docs
my_embedding = ...
set_llm_cache(CassandraSemanticCache(
embedding=my_embedding,
table_name="my_semantic_cache",
))
It uses a single (vector) Cassandra table and stores, in principle,
cached values from several LLMs, so the LLM's llm_string is part
of the rows' primary keys.
The similarity is based on one of several distance metrics (default: "dot").
If choosing another metric, the default threshold is to be re-tuned accordingly.
One can choose a similarity measure (default: "dot" for dot-product).
Choosing another one ("cos", "l2") almost certainly requires threshold tuning.
(which may be in order nevertheless, even if sticking to "dot").
Args:
session: an open Cassandra session.
Leave unspecified to use the global cassio init (see below)
keyspace: the keyspace to use for storing the cache.
Leave unspecified to use the global cassio init (see below)
embedding: Embedding provider for semantic
encoding and search.
table_name: name of the Cassandra (vector) table
to use as cache. There is a default for "simple" usage, but
remember to explicitly specify different tables if several embedding
models coexist in your app (they cannot share one cache table).
distance_metric: an alias for the 'similarity_measure' parameter (see below).
As the "distance" terminology is misleading, please prefer
'similarity_measure' for clarity.
score_threshold: numeric value to use as
cutoff for the similarity searches
ttl_seconds: time-to-live for cache entries
(default: None, i.e. forever)
similarity_measure: which measure to adopt for similarity searches.
Note: this parameter is aliased by 'distance_metric' - however,
it is suggested to use the "similarity" terminology since this value
is in fact a similarity (i.e. higher means closer).
Note that at most one of the two parameters 'distance_metric'
and 'similarity_measure' can be provided.
setup_mode: a value in langchain_community.utilities.cassandra.SetupMode.
Choose between SYNC, ASYNC and OFF - the latter if the Cassandra
table is guaranteed to exist already, for a faster initialization.
Note:
The session and keyspace parameters, when left out (or passed as None),
fall back to the globally-available cassio settings if any are available.
In other words, if a previously-run 'cassio.init(...)' has been
executed previously anywhere in the code, Cassandra-based objects
need not specify the connection parameters at all.
"""
def __init__(
self,
session: Optional[CassandraSession],
keyspace: Optional[str],
embedding: Embeddings,
session: Optional[CassandraSession] = None,
keyspace: Optional[str] = None,
embedding: Optional[Embeddings] = None,
table_name: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TABLE_NAME,
distance_metric: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC,
distance_metric: Optional[str] = None,
score_threshold: float = CASSANDRA_SEMANTIC_CACHE_DEFAULT_SCORE_THRESHOLD,
ttl_seconds: Optional[int] = CASSANDRA_SEMANTIC_CACHE_DEFAULT_TTL_SECONDS,
skip_provisioning: bool = False,
similarity_measure: str = CASSANDRA_SEMANTIC_CACHE_DEFAULT_DISTANCE_METRIC,
setup_mode: CassandraSetupMode = CassandraSetupMode.SYNC,
):
"""
Initialize the cache with all relevant parameters.
Args:
session (cassandra.cluster.Session): an open Cassandra session
keyspace (str): the keyspace to use for storing the cache
embedding (Embedding): Embedding provider for semantic
encoding and search.
table_name (str): name of the Cassandra (vector) table
to use as cache
distance_metric (str, 'dot'): which measure to adopt for
similarity searches
score_threshold (optional float): numeric value to use as
cutoff for the similarity searches
ttl_seconds (optional int): time-to-live for cache entries
(default: None, i.e. forever)
The default score threshold is tuned to the default metric.
Tune it carefully yourself if switching to another distance metric.
"""
if skip_provisioning:
warn_deprecated(
"0.0.33", alternative="Use setup_mode=CassandraSetupMode.OFF instead."
"0.0.33",
name="skip_provisioning",
alternative=(
"setup_mode=langchain_community.utilities.cassandra.SetupMode.OFF"
),
pending=True,
)
try:
from cassio.table import MetadataVectorCassandraTable
except (ImportError, ModuleNotFoundError):
raise ImportError(
"Could not import cassio python package. "
"Please install it with `pip install cassio`."
"Please install it with `pip install -U cassio`."
)
if not embedding:
raise ValueError("Missing required parameter 'embedding'.")
# detect if legacy 'distance_metric' parameter used
if distance_metric is not None:
# if passed, takes precedence over 'similarity_measure', but we warn:
warn_deprecated(
"0.0.33",
name="distance_metric",
alternative="similarity_measure",
pending=True,
)
similarity_measure = distance_metric
self.session = session
self.keyspace = keyspace
self.embedding = embedding
self.table_name = table_name
self.distance_metric = distance_metric
self.similarity_measure = similarity_measure
self.score_threshold = score_threshold
self.ttl_seconds = ttl_seconds
@ -1347,7 +1433,7 @@ class CassandraSemanticCache(BaseCache):
vector=prompt_embedding,
metadata={"_llm_string_hash": _hash(llm_string)},
n=1,
metric=self.distance_metric,
metric=self.similarity_measure,
metric_threshold=self.score_threshold,
)
)
@ -1378,7 +1464,7 @@ class CassandraSemanticCache(BaseCache):
vector=prompt_embedding,
metadata={"_llm_string_hash": _hash(llm_string)},
n=1,
metric=self.distance_metric,
metric=self.similarity_measure,
metric_threshold=self.score_threshold,
)
)

@ -6,6 +6,12 @@ from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.outputs import LLMResult
MODEL_COST_PER_1K_TOKENS = {
# GPT-4o input
"gpt-4o": 0.005,
"gpt-4o-2024-05-13": 0.005,
# GPT-4o output
"gpt-4o-completion": 0.015,
"gpt-4o-2024-05-13-completion": 0.015,
# GPT-4 input
"gpt-4": 0.03,
"gpt-4-0314": 0.03,

@ -0,0 +1,24 @@
"""
Chains module for langchain_community
This module contains the community chains.
"""
import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.chains.pebblo_retrieval.base import PebbloRetrievalQA
__all__ = ["PebbloRetrievalQA"]
_module_lookup = {
"PebbloRetrievalQA": "langchain_community.chains.pebblo_retrieval.base"
}
def __getattr__(name: str) -> Any:
if name in _module_lookup:
module = importlib.import_module(_module_lookup[name])
return getattr(module, name)
raise AttributeError(f"module {__name__} has no attribute {name}")

@ -0,0 +1,218 @@
"""
Pebblo Retrieval Chain with Identity & Semantic Enforcement for question-answering
against a vector database.
"""
import inspect
from typing import Any, Dict, List, Optional
from langchain.chains.base import Chain
from langchain.chains.combine_documents.base import BaseCombineDocumentsChain
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain_core.documents import Document
from langchain_core.language_models import BaseLanguageModel
from langchain_core.pydantic_v1 import Extra, Field, validator
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_community.chains.pebblo_retrieval.enforcement_filters import (
SUPPORTED_VECTORSTORES,
set_enforcement_filters,
)
from langchain_community.chains.pebblo_retrieval.models import (
AuthContext,
SemanticContext,
)
class PebbloRetrievalQA(Chain):
"""
Retrieval Chain with Identity & Semantic Enforcement for question-answering
against a vector database.
"""
combine_documents_chain: BaseCombineDocumentsChain
"""Chain to use to combine the documents."""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_source_documents: bool = False
"""Return the source documents or not."""
retriever: VectorStoreRetriever = Field(exclude=True)
"""VectorStore to use for retrieval."""
auth_context_key: str = "auth_context" #: :meta private:
"""Authentication context for identity enforcement."""
semantic_context_key: str = "semantic_context" #: :meta private:
"""Semantic context for semantic enforcement."""
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
auth_context = inputs.get(self.auth_context_key)
semantic_context = inputs.get(self.semantic_context_key)
accepts_run_manager = (
"run_manager" in inspect.signature(self._get_docs).parameters
)
if accepts_run_manager:
docs = self._get_docs(
question, auth_context, semantic_context, run_manager=_run_manager
)
else:
docs = self._get_docs(question, auth_context, semantic_context) # type: ignore[call-arg]
answer = self.combine_documents_chain.run(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run get_relevant_text and llm on input query.
If chain has 'return_source_documents' as 'True', returns
the retrieved documents as well under the key 'source_documents'.
Example:
.. code-block:: python
res = indexqa({'query': 'This is my query'})
answer, docs = res['result'], res['source_documents']
"""
_run_manager = run_manager or AsyncCallbackManagerForChainRun.get_noop_manager()
question = inputs[self.input_key]
auth_context = inputs.get(self.auth_context_key)
semantic_context = inputs.get(self.semantic_context_key)
accepts_run_manager = (
"run_manager" in inspect.signature(self._aget_docs).parameters
)
if accepts_run_manager:
docs = await self._aget_docs(
question, auth_context, semantic_context, run_manager=_run_manager
)
else:
docs = await self._aget_docs(question, auth_context, semantic_context) # type: ignore[call-arg]
answer = await self.combine_documents_chain.arun(
input_documents=docs, question=question, callbacks=_run_manager.get_child()
)
if self.return_source_documents:
return {self.output_key: answer, "source_documents": docs}
else:
return {self.output_key: answer}
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
allow_population_by_field_name = True
@property
def input_keys(self) -> List[str]:
"""Input keys.
:meta private:
"""
return [self.input_key, self.auth_context_key, self.semantic_context_key]
@property
def output_keys(self) -> List[str]:
"""Output keys.
:meta private:
"""
_output_keys = [self.output_key]
if self.return_source_documents:
_output_keys += ["source_documents"]
return _output_keys
@property
def _chain_type(self) -> str:
"""Return the chain type."""
return "pebblo_retrieval_qa"
@classmethod
def from_chain_type(
cls,
llm: BaseLanguageModel,
chain_type: str = "stuff",
chain_type_kwargs: Optional[dict] = None,
**kwargs: Any,
) -> "PebbloRetrievalQA":
"""Load chain from chain type."""
from langchain.chains.question_answering import load_qa_chain
_chain_type_kwargs = chain_type_kwargs or {}
combine_documents_chain = load_qa_chain(
llm, chain_type=chain_type, **_chain_type_kwargs
)
return cls(combine_documents_chain=combine_documents_chain, **kwargs)
@validator("retriever", pre=True, always=True)
def validate_vectorstore(
cls, retriever: VectorStoreRetriever
) -> VectorStoreRetriever:
"""
Validate that the vectorstore of the retriever is supported vectorstores.
"""
if not any(
isinstance(retriever.vectorstore, supported_class)
for supported_class in SUPPORTED_VECTORSTORES
):
raise ValueError(
f"Vectorstore must be an instance of one of the supported "
f"vectorstores: {SUPPORTED_VECTORSTORES}. "
f"Got {type(retriever.vectorstore).__name__} instead."
)
return retriever
def _get_docs(
self,
question: str,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
*,
run_manager: CallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
set_enforcement_filters(self.retriever, auth_context, semantic_context)
return self.retriever.get_relevant_documents(
question, callbacks=run_manager.get_child()
)
async def _aget_docs(
self,
question: str,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
*,
run_manager: AsyncCallbackManagerForChainRun,
) -> List[Document]:
"""Get docs."""
set_enforcement_filters(self.retriever, auth_context, semantic_context)
return await self.retriever.aget_relevant_documents(
question, callbacks=run_manager.get_child()
)

@ -0,0 +1,265 @@
"""
Identity & Semantic Enforcement filters for PebbloRetrievalQA chain:
This module contains methods for applying Identity and Semantic Enforcement filters
in the PebbloRetrievalQA chain.
These filters are used to control the retrieval of documents based on authorization and
semantic context.
The Identity Enforcement filter ensures that only authorized identities can access
certain documents, while the Semantic Enforcement filter controls document retrieval
based on semantic context.
The methods in this module are designed to work with different types of vector stores.
"""
import logging
from typing import List, Optional, Union
from langchain_core.vectorstores import VectorStoreRetriever
from langchain_community.chains.pebblo_retrieval.models import (
AuthContext,
SemanticContext,
)
from langchain_community.vectorstores import Pinecone, Qdrant
logger = logging.getLogger(__name__)
SUPPORTED_VECTORSTORES = [Pinecone, Qdrant]
def set_enforcement_filters(
retriever: VectorStoreRetriever,
auth_context: Optional[AuthContext],
semantic_context: Optional[SemanticContext],
) -> None:
"""
Set identity and semantic enforcement filters in the retriever.
"""
if auth_context is not None:
_set_identity_enforcement_filter(retriever, auth_context)
if semantic_context is not None:
_set_semantic_enforcement_filter(retriever, semantic_context)
def _apply_qdrant_semantic_filter(
search_kwargs: dict, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs for Qdrant vectorstore.
"""
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ValueError(
"Could not import `qdrant-client.http` python package. "
"Please install it with `pip install qdrant-client`."
) from e
# Create a semantic enforcement filter condition
semantic_filters: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
if (
semantic_context is not None
and semantic_context.pebblo_semantic_topics is not None
):
semantic_topics_filter = rest.FieldCondition(
key="metadata.pebblo_semantic_topics",
match=rest.MatchAny(any=semantic_context.pebblo_semantic_topics.deny),
)
semantic_filters.append(semantic_topics_filter)
if (
semantic_context is not None
and semantic_context.pebblo_semantic_entities is not None
):
semantic_entities_filter = rest.FieldCondition(
key="metadata.pebblo_semantic_entities",
match=rest.MatchAny(any=semantic_context.pebblo_semantic_entities.deny),
)
semantic_filters.append(semantic_entities_filter)
# If 'filter' already exists in search_kwargs
if "filter" in search_kwargs:
existing_filter: rest.Filter = search_kwargs["filter"]
# Check if existing_filter is a qdrant-client filter
if isinstance(existing_filter, rest.Filter):
# If 'must_not' condition exists in the existing filter
if isinstance(existing_filter.must_not, list):
# Warn if 'pebblo_semantic_topics' or 'pebblo_semantic_entities'
# filter is overridden
new_must_not_conditions: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
# Drop semantic filter conditions if already present
for condition in existing_filter.must_not:
if hasattr(condition, "key"):
if condition.key == "metadata.pebblo_semantic_topics":
continue
if condition.key == "metadata.pebblo_semantic_entities":
continue
new_must_not_conditions.append(condition)
# Add semantic enforcement filters to 'must_not' conditions
existing_filter.must_not = new_must_not_conditions
existing_filter.must_not.extend(semantic_filters)
else:
# Set 'must_not' condition with semantic enforcement filters
existing_filter.must_not = semantic_filters
else:
raise TypeError(
"Using dict as a `filter` is deprecated. "
"Please use qdrant-client filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/"
)
else:
# If 'filter' does not exist in search_kwargs, create it
search_kwargs["filter"] = rest.Filter(must_not=semantic_filters)
def _apply_qdrant_authorization_filter(
search_kwargs: dict, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs for Qdrant vectorstore.
"""
try:
from qdrant_client.http import models as rest
except ImportError as e:
raise ValueError(
"Could not import `qdrant-client.http` python package. "
"Please install it with `pip install qdrant-client`."
) from e
if auth_context is not None:
# Create a identity enforcement filter condition
identity_enforcement_filter = rest.FieldCondition(
key="metadata.authorized_identities",
match=rest.MatchAny(any=auth_context.user_auth),
)
else:
return
# If 'filter' already exists in search_kwargs
if "filter" in search_kwargs:
existing_filter: rest.Filter = search_kwargs["filter"]
# Check if existing_filter is a qdrant-client filter
if isinstance(existing_filter, rest.Filter):
# If 'must' exists in the existing filter
if existing_filter.must:
new_must_conditions: List[
Union[
rest.FieldCondition,
rest.IsEmptyCondition,
rest.IsNullCondition,
rest.HasIdCondition,
rest.NestedCondition,
rest.Filter,
]
] = []
# Drop 'authorized_identities' filter condition if already present
for condition in existing_filter.must:
if (
hasattr(condition, "key")
and condition.key == "metadata.authorized_identities"
):
continue
new_must_conditions.append(condition)
# Add identity enforcement filter to 'must' conditions
existing_filter.must = new_must_conditions
existing_filter.must.append(identity_enforcement_filter)
else:
# Set 'must' condition with identity enforcement filter
existing_filter.must = [identity_enforcement_filter]
else:
raise TypeError(
"Using dict as a `filter` is deprecated. "
"Please use qdrant-client filters directly: "
"https://qdrant.tech/documentation/concepts/filtering/"
)
else:
# If 'filter' does not exist in search_kwargs, create it
search_kwargs["filter"] = rest.Filter(must=[identity_enforcement_filter])
def _apply_pinecone_semantic_filter(
search_kwargs: dict, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs for Pinecone vectorstore.
"""
# Check if semantic_context is provided
semantic_context = semantic_context
if semantic_context is not None:
if semantic_context.pebblo_semantic_topics is not None:
# Add pebblo_semantic_topics filter to search_kwargs
search_kwargs.setdefault("filter", {})["pebblo_semantic_topics"] = {
"$nin": semantic_context.pebblo_semantic_topics.deny
}
if semantic_context.pebblo_semantic_entities is not None:
# Add pebblo_semantic_entities filter to search_kwargs
search_kwargs.setdefault("filter", {})["pebblo_semantic_entities"] = {
"$nin": semantic_context.pebblo_semantic_entities.deny
}
def _apply_pinecone_authorization_filter(
search_kwargs: dict, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs for Pinecone vectorstore.
"""
if auth_context is not None:
search_kwargs.setdefault("filter", {})["authorized_identities"] = {
"$in": auth_context.user_auth
}
def _set_identity_enforcement_filter(
retriever: VectorStoreRetriever, auth_context: Optional[AuthContext]
) -> None:
"""
Set identity enforcement filter in search_kwargs.
This method sets the identity enforcement filter in the search_kwargs
of the retriever based on the type of the vectorstore.
"""
search_kwargs = retriever.search_kwargs
if isinstance(retriever.vectorstore, Pinecone):
_apply_pinecone_authorization_filter(search_kwargs, auth_context)
elif isinstance(retriever.vectorstore, Qdrant):
_apply_qdrant_authorization_filter(search_kwargs, auth_context)
def _set_semantic_enforcement_filter(
retriever: VectorStoreRetriever, semantic_context: Optional[SemanticContext]
) -> None:
"""
Set semantic enforcement filter in search_kwargs.
This method sets the semantic enforcement filter in the search_kwargs
of the retriever based on the type of the vectorstore.
"""
search_kwargs = retriever.search_kwargs
if isinstance(retriever.vectorstore, Pinecone):
_apply_pinecone_semantic_filter(search_kwargs, semantic_context)
elif isinstance(retriever.vectorstore, Qdrant):
_apply_qdrant_semantic_filter(search_kwargs, semantic_context)

@ -0,0 +1,62 @@
"""Models for the PebbloRetrievalQA chain."""
from typing import Any, List, Optional
from langchain_core.pydantic_v1 import BaseModel
class AuthContext(BaseModel):
"""Class for an authorization context."""
name: Optional[str] = None
user_id: str
user_auth: List[str]
"""List of user authorizations, which may include their User ID and
the groups they are part of"""
class SemanticEntities(BaseModel):
"""Class for a semantic entity filter."""
deny: List[str]
class SemanticTopics(BaseModel):
"""Class for a semantic topic filter."""
deny: List[str]
class SemanticContext(BaseModel):
"""Class for a semantic context."""
pebblo_semantic_entities: Optional[SemanticEntities] = None
pebblo_semantic_topics: Optional[SemanticTopics] = None
def __init__(self, **data: Any) -> None:
super().__init__(**data)
# Validate semantic_context
if (
self.pebblo_semantic_entities is None
and self.pebblo_semantic_topics is None
):
raise ValueError(
"semantic_context must contain 'pebblo_semantic_entities' or "
"'pebblo_semantic_topics'"
)
class ChainInput(BaseModel):
"""Input for PebbloRetrievalQA chain."""
query: str
auth_context: Optional[AuthContext] = None
semantic_context: Optional[SemanticContext] = None
def dict(self, **kwargs: Any) -> dict:
base_dict = super().dict(**kwargs)
# Keep auth_context and semantic_context as it is(Pydantic models)
base_dict["auth_context"] = self.auth_context
base_dict["semantic_context"] = self.semantic_context
return base_dict

@ -1,4 +1,5 @@
"""OpenAI chat wrapper."""
from __future__ import annotations
import logging
@ -217,7 +218,7 @@ class ChatOpenAI(BaseChatModel):
)
"""Timeout for requests to OpenAI completion API. Can be float, httpx.Timeout or
None."""
max_retries: int = 2
max_retries: int = Field(default=2)
"""Maximum number of retries to make when generating."""
streaming: bool = False
"""Whether to stream the results or not."""

@ -2,6 +2,7 @@ from __future__ import annotations
import asyncio
import functools
import json
import logging
from typing import (
Any,
@ -12,6 +13,8 @@ from typing import (
List,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
)
@ -20,6 +23,7 @@ from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
@ -32,6 +36,8 @@ from langchain_core.messages import (
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
ToolMessageChunk,
)
from langchain_core.output_parsers.openai_tools import (
make_invalid_tool_call,
@ -42,8 +48,11 @@ from langchain_core.outputs import (
ChatGenerationChunk,
ChatResult,
)
from langchain_core.pydantic_v1 import Field, SecretStr, root_validator
from langchain_core.pydantic_v1 import BaseModel, Field, SecretStr, root_validator
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from langchain_core.utils.function_calling import convert_to_openai_tool
from requests.exceptions import HTTPError
from tenacity import (
before_sleep_log,
@ -68,6 +77,7 @@ def convert_dict_to_message(
"""Convert a dict to a message."""
role = _dict["role"]
content = _dict["content"]
if role == "user":
return (
HumanMessageChunk(content=content)
@ -79,17 +89,39 @@ def convert_dict_to_message(
invalid_tool_calls = []
if "tool_calls" in _dict:
additional_kwargs = {"tool_calls": _dict["tool_calls"]}
for raw_tool_call in _dict["tool_calls"]:
try:
tool_calls.append(parse_tool_call(raw_tool_call, return_id=True))
except Exception as e:
invalid_tool_calls.append(
make_invalid_tool_call(raw_tool_call, str(e))
)
for index, value in enumerate(_dict["tool_calls"]):
if is_chunk:
try:
tool_calls.append(
{
"name": value["function"].get("name"),
"args": value["function"].get("arguments"),
"id": value.get("id"),
# Tongyi does not respond with index,
# use index in the list instead
"index": index,
}
)
except KeyError:
pass
else:
try:
parsed_tool = parse_tool_call(value, return_id=True)
if parsed_tool:
tool_calls.append(parsed_tool)
except Exception as e:
invalid_tool_calls.append(make_invalid_tool_call(value, str(e)))
else:
additional_kwargs = {}
return (
AIMessageChunk(content=content)
AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
tool_call_chunks=tool_calls, # type: ignore[arg-type]
id=_dict.get("id"),
)
if is_chunk
else AIMessage(
content=content,
@ -104,6 +136,23 @@ def convert_dict_to_message(
if is_chunk
else SystemMessage(content=content)
)
elif role == "tool":
additional_kwargs = {}
if "name" in _dict:
additional_kwargs["name"] = _dict["name"]
return (
ToolMessageChunk(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
if is_chunk
else ToolMessage(
content=_dict.get("content", ""),
tool_call_id=_dict.get("tool_call_id"), # type: ignore[arg-type]
additional_kwargs=additional_kwargs,
)
)
else:
return (
ChatMessageChunk(role=role, content=content)
@ -113,17 +162,23 @@ def convert_dict_to_message(
def convert_message_chunk_to_message(message_chunk: BaseMessageChunk) -> BaseMessage:
"""Convert a message chunk to a message."""
if isinstance(message_chunk, HumanMessageChunk):
return HumanMessage(content=message_chunk.content)
elif isinstance(message_chunk, AIMessageChunk):
return AIMessage(content=message_chunk.content)
elif isinstance(message_chunk, SystemMessageChunk):
return SystemMessage(content=message_chunk.content)
elif isinstance(message_chunk, ChatMessageChunk):
return ChatMessage(role=message_chunk.role, content=message_chunk.content)
else:
raise TypeError(f"Got unknown type {message_chunk}")
"""Convert a message chunk to a message.
Args:
chunk: Message chunk to convert.
Returns:
Message.
"""
if not isinstance(message_chunk, BaseMessageChunk):
return message_chunk
# chunk classes always have the equivalent non-chunk class as their first parent
ignore_keys = ["type"]
if isinstance(message_chunk, AIMessageChunk):
ignore_keys.append("tool_call_chunks")
return message_chunk.__class__.__mro__[1](
**{k: v for k, v in message_chunk.__dict__.items() if k not in ignore_keys}
)
def convert_message_to_dict(message: BaseMessage) -> dict:
@ -136,8 +191,17 @@ def convert_message_to_dict(message: BaseMessage) -> dict:
message_dict = {"role": "user", "content": message.content}
elif isinstance(message, AIMessage):
message_dict = {"role": "assistant", "content": message.content}
if "tool_calls" in message.additional_kwargs:
message_dict["tool_calls"] = message.additional_kwargs["tool_calls"]
elif isinstance(message, SystemMessage):
message_dict = {"role": "system", "content": message.content}
elif isinstance(message, ToolMessage):
message_dict = {
"role": "tool",
"tool_call_id": message.tool_call_id,
"content": message.content,
"name": message.name,
}
else:
raise TypeError(f"Got unknown type {message}")
return message_dict
@ -256,11 +320,57 @@ class ChatTongyi(BaseChatModel):
@retry_decorator
def _stream_completion_with_retry(**_kwargs: Any) -> Any:
responses = self.client.call(**_kwargs)
prev_resp = None
for resp in responses:
yield check_response(resp)
# If we are streaming without `incremental_output = True`,
# we need to calculate the delta response manually
if _kwargs.get("stream") and not _kwargs.get(
"incremental_output", False
):
if prev_resp is None:
delta_resp = resp
else:
delta_resp = self.subtract_client_response(resp, prev_resp)
prev_resp = resp
yield check_response(delta_resp)
else:
yield check_response(resp)
return _stream_completion_with_retry(**kwargs)
def subtract_client_response(self, resp: Any, prev_resp: Any) -> Any:
"""Subtract prev response from curr response.
Useful when streaming without `incremental_output = True`
"""
resp_copy = json.loads(json.dumps(resp))
choice = resp_copy["output"]["choices"][0]
message = choice["message"]
prev_resp_copy = json.loads(json.dumps(prev_resp))
prev_choice = prev_resp_copy["output"]["choices"][0]
prev_message = prev_choice["message"]
message["content"] = message["content"].replace(prev_message["content"], "")
if message.get("tool_calls"):
for index, tool_call in enumerate(message["tool_calls"]):
function = tool_call["function"]
if prev_message.get("tool_calls"):
prev_function = prev_message["tool_calls"][index]["function"]
function["name"] = function["name"].replace(
prev_function["name"], ""
)
function["arguments"] = function["arguments"].replace(
prev_function["arguments"], ""
)
return resp_copy
async def astream_completion_with_retry(self, **kwargs: Any) -> Any:
"""Because the dashscope SDK doesn't provide an async API,
we wrap `stream_generate_with_retry` with an async generator."""
@ -301,16 +411,16 @@ class ChatTongyi(BaseChatModel):
) -> ChatResult:
generations = []
if self.streaming:
generation: Optional[ChatGenerationChunk] = None
generation_chunk: Optional[ChatGenerationChunk] = None
for chunk in self._stream(
messages, stop=stop, run_manager=run_manager, **kwargs
):
if generation is None:
generation = chunk
if generation_chunk is None:
generation_chunk = chunk
else:
generation += chunk
assert generation is not None
generations.append(self._chunk_to_generation(generation))
generation_chunk += chunk
assert generation_chunk is not None
generations.append(self._chunk_to_generation(generation_chunk))
else:
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, **kwargs
@ -373,9 +483,19 @@ class ChatTongyi(BaseChatModel):
params: Dict[str, Any] = self._invocation_params(
messages=messages, stop=stop, stream=True, **kwargs
)
for stream_resp, is_last_chunk in generate_with_last_element_mark(
self.stream_completion_with_retry(**params)
):
choice = stream_resp["output"]["choices"][0]
message = choice["message"]
if (
choice["finish_reason"] == "null"
and message["content"] == ""
and "tool_calls" not in message
):
continue
chunk = ChatGenerationChunk(
**self._chat_generation_from_qwen_resp(
stream_resp, is_chunk=True, is_last_chunk=is_last_chunk
@ -413,14 +533,13 @@ class ChatTongyi(BaseChatModel):
params = {**self._default_params, **kwargs}
if stop is not None:
params["stop"] = stop
if params.get("stream"):
# According to the Tongyi official docs,
# `incremental_output` with `tools` is not supported yet
if params.get("stream") and not params.get("tools"):
params["incremental_output"] = True
message_dicts = [convert_message_to_dict(m) for m in messages]
# According to the docs, the last message should be a `user` message
if message_dicts[-1]["role"] != "user":
raise ValueError("Last message should be user message.")
# And the `system` message should be the first message if present
system_message_indices = [
i for i, m in enumerate(message_dicts) if m["role"] == "system"
@ -470,3 +589,22 @@ class ChatTongyi(BaseChatModel):
message=convert_message_chunk_to_message(chunk.message),
generation_info=chunk.generation_info,
)
def bind_tools(
self,
tools: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable, BaseTool]],
**kwargs: Any,
) -> Runnable[LanguageModelInput, BaseMessage]:
"""Bind tool-like objects to this chat model.
Args:
tools: A list of tool definitions to bind to this chat model.
Can be a dictionary, pydantic model, callable, or BaseTool. Pydantic
models, callables, and BaseTools will be automatically converted to
their schema dictionary representation.
**kwargs: Any additional parameters to pass to the
:class:`~langchain.runnable.Runnable` constructor.
"""
formatted_tools = [convert_to_openai_tool(tool) for tool in tools]
return super().bind(tools=formatted_tools, **kwargs)

@ -2,6 +2,9 @@ import importlib
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from langchain_community.document_compressors.flashrank_rerank import (
FlashrankRerank,
)
from langchain_community.document_compressors.jina_rerank import (
JinaRerank, # noqa: F401
)
@ -12,12 +15,13 @@ if TYPE_CHECKING:
OpenVINOReranker,
)
__all__ = ["LLMLinguaCompressor", "OpenVINOReranker"]
__all__ = ["LLMLinguaCompressor", "OpenVINOReranker", "FlashrankRerank"]
_module_lookup = {
"LLMLinguaCompressor": "langchain_community.document_compressors.llmlingua_filter",
"OpenVINOReranker": "langchain_community.document_compressors.openvino_rerank",
"JinaRerank": "langchain_community.document_compressors.jina_rerank",
"FlashrankRerank": "langchain_community.document_compressors.flashrank_rerank",
}

@ -0,0 +1,76 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Dict, Optional, Sequence
from langchain_core.callbacks.manager import Callbacks
from langchain_core.documents import BaseDocumentCompressor, Document
from langchain_core.pydantic_v1 import Extra, root_validator
if TYPE_CHECKING:
from flashrank import Ranker, RerankRequest
else:
# Avoid pydantic annotation issues when actually instantiating
# while keeping this import optional
try:
from flashrank import Ranker, RerankRequest
except ImportError:
pass
DEFAULT_MODEL_NAME = "ms-marco-MultiBERT-L-12"
class FlashrankRerank(BaseDocumentCompressor):
"""Document compressor using Flashrank interface."""
client: Ranker
"""Flashrank client to use for compressing documents"""
top_n: int = 3
"""Number of documents to return."""
model: Optional[str] = None
"""Model to use for reranking."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def validate_environment(cls, values: Dict) -> Dict:
"""Validate that api key and python package exists in environment."""
try:
from flashrank import Ranker
except ImportError:
raise ImportError(
"Could not import flashrank python package. "
"Please install it with `pip install flashrank`."
)
values["model"] = values.get("model", DEFAULT_MODEL_NAME)
values["client"] = Ranker(model_name=values["model"])
return values
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
passages = [
{"id": i, "text": doc.page_content, "meta": doc.metadata}
for i, doc in enumerate(documents)
]
rerank_request = RerankRequest(query=query, passages=passages)
rerank_response = self.client.rerank(rerank_request)[: self.top_n]
final_results = []
for r in rerank_response:
metadata = r["meta"]
metadata["relevance_score"] = r["score"]
doc = Document(
page_content=r["text"],
metadata=metadata,
)
final_results.append(doc)
return final_results

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save