langchain[patch],community[patch]: Move unit tests that depend on community to community (#21685)

pull/21789/head
Eugene Yurtsev 2 weeks ago committed by GitHub
parent 97a4ae50d2
commit 8607735b80
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -2,18 +2,16 @@
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from typing import Any, Optional
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_community.chains.openapi.chain import OpenAPIEndpointChain
from langchain_community.tools.openapi.utils.api_models import APIOperation
from langchain_community.tools.openapi.utils.openapi_utils import OpenAPISpec
from langchain_community.utilities.requests import Requests
if TYPE_CHECKING:
from langchain.chains.api.openapi.chain import OpenAPIEndpointChain
class NLATool(Tool):
"""Natural Language API Tool."""

@ -1,4 +1,4 @@
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]]
name = "aenum"
@ -3455,6 +3455,7 @@ files = [
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:227b178b22a7f91ae88525810441791b1ca1fc71c86f03190911793be15cec3d"},
{file = "jq-1.6.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:780eb6383fbae12afa819ef676fc93e1548ae4b076c004a393af26a04b460742"},
{file = "jq-1.6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:08ded6467f4ef89fec35b2bf310f210f8cd13fbd9d80e521500889edf8d22441"},
{file = "jq-1.6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:49e44ed677713f4115bd5bf2dbae23baa4cd503be350e12a1c1f506b0687848f"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:984f33862af285ad3e41e23179ac4795f1701822473e1a26bf87ff023e5a89ea"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f42264fafc6166efb5611b5d4cb01058887d050a6c19334f6a3f8a13bb369df5"},
{file = "jq-1.6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a67154f150aaf76cc1294032ed588436eb002097dd4fd1e283824bf753a05080"},
@ -4031,7 +4032,7 @@ url = "../core"
[[package]]
name = "langchain-text-splitters"
version = "0.0.1"
version = "0.0.2"
description = "LangChain text splitting utilities"
optional = false
python-versions = ">=3.8.1,<4.0"
@ -6104,8 +6105,6 @@ files = [
{file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"},
{file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"},
{file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"},
{file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"},
{file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"},
{file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"},
{file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"},
{file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"},
@ -6148,7 +6147,6 @@ files = [
{file = "psycopg2_binary-2.9.9-cp311-cp311-win32.whl", hash = "sha256:dc4926288b2a3e9fd7b50dc6a1909a13bbdadfc67d93f3374d984e56f885579d"},
{file = "psycopg2_binary-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:b76bedd166805480ab069612119ea636f5ab8f8771e640ae103e05a4aae3e417"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8532fd6e6e2dc57bcb3bc90b079c60de896d2128c5d9d6f24a63875a95a088cf"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0605eaed3eb239e87df0d5e3c6489daae3f7388d455d0c0b4df899519c6a38d"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f8544b092a29a6ddd72f3556a9fcf249ec412e10ad28be6a0c0d948924f2212"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d423c8d8a3c82d08fe8af900ad5b613ce3632a1249fd6a223941d0735fce493"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e5afae772c00980525f6d6ecf7cbca55676296b580c0e6abb407f15f3706996"},
@ -6157,8 +6155,6 @@ files = [
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:cb16c65dcb648d0a43a2521f2f0a2300f40639f6f8c1ecbc662141e4e3e1ee07"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:911dda9c487075abd54e644ccdf5e5c16773470a6a5d3826fda76699410066fb"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:57fede879f08d23c85140a360c6a77709113efd1c993923c59fde17aa27599fe"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win32.whl", hash = "sha256:64cf30263844fa208851ebb13b0732ce674d8ec6a0c86a4e160495d299ba3c93"},
{file = "psycopg2_binary-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:81ff62668af011f9a48787564ab7eded4e9fb17a4a6a74af5ffa6a457400d2ab"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2293b001e319ab0d869d660a704942c9e2cce19745262a8aba2115ef41a0a42a"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03ef7df18daf2c4c07e2695e8cfd5ee7f748a1d54d802330985a78d2a5a6dca9"},
{file = "psycopg2_binary-2.9.9-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a602ea5aff39bb9fac6308e9c9d82b9a35c2bf288e184a816002c9fae930b77"},
@ -6691,26 +6687,31 @@ python-versions = ">=3.8"
files = [
{file = "PyMuPDF-1.23.26-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:645a05321aecc8c45739f71f0eb574ce33138d19189582ffa5241fea3a8e2549"},
{file = "PyMuPDF-1.23.26-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:2dfc9e010669ae92fade6fb72aaea49ebe3b8dcd7ee4dcbbe50115abcaa4d3fe"},
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:734ee380b3abd038602be79114194a3cb74ac102b7c943bcb333104575922c50"},
{file = "PyMuPDF-1.23.26-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:b22f8d854f8196ad5b20308c1cebad3d5189ed9f0988acbafa043947ea7e6c55"},
{file = "PyMuPDF-1.23.26-cp310-none-win32.whl", hash = "sha256:cc0f794e3466bc96b5bf79d42fbc1551428751e3fef38ebc10ac70396b676144"},
{file = "PyMuPDF-1.23.26-cp310-none-win_amd64.whl", hash = "sha256:2eb701247d8e685a24e45899d1175f01a3ce5fc792a4431c91fbb68633b29298"},
{file = "PyMuPDF-1.23.26-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:e2804a64bb57da414781e312fb0561f6be67658ad57ed4a73dce008b23fc70a6"},
{file = "PyMuPDF-1.23.26-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:97b40bb22e3056874634617a90e0ed24a5172cf71791b9e25d1d91c6743bc567"},
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:fab8833559bc47ab26ce736f915b8fc1dd37c108049b90396f7cd5e1004d7593"},
{file = "PyMuPDF-1.23.26-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:f25aafd3e7fb9d7761a22acf2b67d704f04cc36d4dc33a3773f0eb3f4ec3606f"},
{file = "PyMuPDF-1.23.26-cp311-none-win32.whl", hash = "sha256:05e672ed3e82caca7ef02a88ace30130b1dd392a1190f03b2b58ffe7aa331400"},
{file = "PyMuPDF-1.23.26-cp311-none-win_amd64.whl", hash = "sha256:92b3c4dd4d0491d495f333be2d41f4e1c155a409bc9d04b5ff29655dccbf4655"},
{file = "PyMuPDF-1.23.26-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:a217689ede18cc6991b4e6a78afee8a440b3075d53b9dec4ba5ef7487d4547e9"},
{file = "PyMuPDF-1.23.26-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:42ad2b819b90ce1947e11b90ec5085889df0a2e3aa0207bc97ecacfc6157cabc"},
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:99607649f89a02bba7d8ebe96e2410664316adc95e9337f7dfeff6a154f93049"},
{file = "PyMuPDF-1.23.26-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:bb42d4b8407b4de7cb58c28f01449f16f32a6daed88afb41108f1aeb3552bdd4"},
{file = "PyMuPDF-1.23.26-cp312-none-win32.whl", hash = "sha256:c40d044411615e6f0baa7d3d933b3032cf97e168c7fa77d1be8a46008c109aee"},
{file = "PyMuPDF-1.23.26-cp312-none-win_amd64.whl", hash = "sha256:3f876533aa7f9a94bcd9a0225ce72571b7808260903fec1d95c120bc842fb52d"},
{file = "PyMuPDF-1.23.26-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:52df831d46beb9ff494f5fba3e5d069af6d81f49abf6b6e799ee01f4f8fa6799"},
{file = "PyMuPDF-1.23.26-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:0bbb0cf6593e53524f3fc26fb5e6ead17c02c64791caec7c4afe61b677dedf80"},
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:5ef4360f20015673c20cf59b7e19afc97168795188c584254ed3778cde43ce77"},
{file = "PyMuPDF-1.23.26-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:d7cd88842b2e7f4c71eef4d87c98c35646b80b60e6375392d7ce40e519261f59"},
{file = "PyMuPDF-1.23.26-cp38-none-win32.whl", hash = "sha256:6577e2f473625e2d0df5f5a3bf1e4519e94ae749733cc9937994d1b256687bfa"},
{file = "PyMuPDF-1.23.26-cp38-none-win_amd64.whl", hash = "sha256:fbe1a3255b2cd0d769b2da2c4efdd0c0f30d4961a1aac02c0f75cf951b337aa4"},
{file = "PyMuPDF-1.23.26-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:73fce034f2afea886a59ead2d0caedf27e2b2a8558b5da16d0286882e0b1eb82"},
{file = "PyMuPDF-1.23.26-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:b3de8618b7cb5b36db611083840b3bcf09b11a893e2d8262f4e042102c7e65de"},
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:879e7f5ad35709d8760ab6103c3d5dac8ab8043a856ab3653fd324af7358ee87"},
{file = "PyMuPDF-1.23.26-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:deee96c2fd415ded7b5070d8d5b2c60679aee6ed0e28ac0d2cb998060d835c2c"},
{file = "PyMuPDF-1.23.26-cp39-none-win32.whl", hash = "sha256:9f7f4ef99dd8ac97fb0b852efa3dcbee515798078b6c79a6a13c7b1e7c5d41a4"},
{file = "PyMuPDF-1.23.26-cp39-none-win_amd64.whl", hash = "sha256:ba9a54552c7afb9ec85432c765e2fa9a81413acfaa7d70db7c9b528297749e5b"},
@ -7151,7 +7152,6 @@ files = [
{file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
{file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a08c6f0fe150303c1c6b71ebcd7213c2858041a7e01975da3a99aed1e7a378ef"},
{file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
{file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
{file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},

@ -1,14 +1,10 @@
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from langchain_core.language_models import FakeListLLM
from langchain_core.tools import Tool
from langchain.agents.agent_types import AgentType
from langchain.agents.initialize import initialize_agent, load_agent
pytest.importorskip("langchain_community")
from langchain_core.language_models import FakeListLLM
from langchain_core.tools import Tool
def test_mrkl_serialization() -> None:

@ -2,10 +2,10 @@
from typing import List
import pytest
from langchain_core.embeddings import FakeEmbeddings
from langchain.evaluation.loading import EvaluatorType, load_evaluators
from langchain.evaluation.schema import PairwiseStringEvaluator, StringEvaluator
from langchain_core.embeddings import FakeEmbeddings
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
@ -26,7 +26,6 @@ def test_load_evaluators(evaluator_type: EvaluatorType) -> None:
)
@pytest.mark.community
@pytest.mark.parametrize(
"evaluator_types",
[

@ -0,0 +1,24 @@
"""Verify proxy imports from langchain to community are behaving as expected."""
def test_all_proxy_llms_are_llm_subclasses() -> None:
"""Simple test to make sure all things are subclasses of BaseLLM."""
from langchain import llms
from langchain_core.language_models import BaseLLM
for cls in llms.__all__:
assert issubclass(getattr(llms, cls), BaseLLM)
def test_vectorstores() -> None:
"""Simple test to make sure all things can be imported."""
from langchain import vectorstores
from langchain_core.vectorstores import VectorStore
for cls in vectorstores.__all__:
if cls not in [
"AlibabaCloudOpenSearchSettings",
"ClickhouseSettings",
"MyScaleSettings",
]:
assert issubclass(getattr(vectorstores, cls), VectorStore)

@ -0,0 +1,875 @@
# serializer version: 1
# name: test_person
'''
{
"lc": 1,
"type": "constructor",
"id": [
"tests",
"unit_tests",
"load",
"test_dump",
"Person"
],
"kwargs": {
"secret": {
"lc": 1,
"type": "secret",
"id": [
"SECRET"
]
},
"you_can_see_me": "hello"
}
}
'''
# ---
# name: test_person.1
'''
{
"lc": 1,
"type": "constructor",
"id": [
"my",
"special",
"namespace",
"SpecialPerson"
],
"kwargs": {
"secret": {
"lc": 1,
"type": "secret",
"id": [
"SECRET"
]
},
"you_can_see_me": "hello",
"another_secret": {
"lc": 1,
"type": "secret",
"id": [
"ANOTHER_SECRET"
]
},
"another_visible": "bye"
}
}
'''
# ---
# name: test_person_with_kwargs
'{"lc":1,"type":"constructor","id":["tests","unit_tests","load","test_dump","Person"],"kwargs":{"secret":{"lc":1,"type":"secret","id":["SECRET"]},"you_can_see_me":"hello"}}'
# ---
# name: test_serialize_llmchain
'''
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"kwargs": {
"input_variables": [
"name"
],
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"llm": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"kwargs": {
"model_name": "davinci",
"temperature": 0.5,
"max_tokens": 256,
"top_p": 1,
"n": 1,
"best_of": 1,
"openai_api_key": {
"lc": 1,
"type": "secret",
"id": [
"OPENAI_API_KEY"
]
},
"openai_proxy": "",
"batch_size": 20,
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"output_key": "text",
"output_parser": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
}
'''
# ---
# name: test_serialize_llmchain_chat
'''
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptTemplate"
],
"kwargs": {
"input_variables": [
"name"
],
"messages": [
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"chat",
"HumanMessagePromptTemplate"
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"kwargs": {
"input_variables": [
"name"
],
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
}
}
}
]
},
"name": "ChatPromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"chat",
"ChatPromptTemplate"
],
"name": "ChatPromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "ChatPromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"llm": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chat_models",
"openai",
"ChatOpenAI"
],
"kwargs": {
"model_name": "davinci",
"temperature": 0.5,
"openai_api_key": {
"lc": 1,
"type": "secret",
"id": [
"OPENAI_API_KEY"
]
},
"openai_proxy": "",
"max_retries": 2,
"n": 1
},
"name": "ChatOpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChatOpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chat_models",
"openai",
"ChatOpenAI"
],
"name": "ChatOpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "ChatOpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"output_key": "text",
"output_parser": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
}
'''
# ---
# name: test_serialize_llmchain_with_non_serializable_arg
'''
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"kwargs": {
"prompt": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"kwargs": {
"input_variables": [
"name"
],
"template": "hello {name}!",
"template_format": "f-string"
},
"name": "PromptTemplate",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "PromptInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 2,
"type": "schema",
"data": "PromptTemplateOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"llm": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"kwargs": {
"model_name": "davinci",
"temperature": 0.5,
"max_tokens": 256,
"top_p": 1,
"n": 1,
"best_of": 1,
"openai_api_key": {
"lc": 1,
"type": "secret",
"id": [
"OPENAI_API_KEY"
]
},
"openai_proxy": "",
"batch_size": 20,
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"output_key": "text",
"output_parser": {
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"kwargs": {},
"name": "StrOutputParser",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "StrOutputParserInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"output_parser",
"StrOutputParser"
],
"name": "StrOutputParser"
}
},
{
"id": 2,
"type": "schema",
"data": "StrOutputParserOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
},
"return_final_only": true
},
"name": "LLMChain",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "ChainInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"chains",
"llm",
"LLMChain"
],
"name": "LLMChain"
}
},
{
"id": 2,
"type": "schema",
"data": "ChainOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
}
'''
# ---
# name: test_serialize_openai_llm
'''
{
"lc": 1,
"type": "constructor",
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"kwargs": {
"model_name": "davinci",
"temperature": 0.7,
"max_tokens": 256,
"top_p": 1,
"n": 1,
"best_of": 1,
"openai_api_key": {
"lc": 1,
"type": "secret",
"id": [
"OPENAI_API_KEY"
]
},
"openai_proxy": "",
"batch_size": 20,
"max_retries": 2,
"disallowed_special": "all"
},
"name": "OpenAI",
"graph": {
"nodes": [
{
"id": 0,
"type": "schema",
"data": "OpenAIInput"
},
{
"id": 1,
"type": "runnable",
"data": {
"id": [
"langchain",
"llms",
"openai",
"OpenAI"
],
"name": "OpenAI"
}
},
{
"id": 2,
"type": "schema",
"data": "OpenAIOutput"
}
],
"edges": [
{
"source": 0,
"target": 1
},
{
"source": 1,
"target": 2
}
]
}
}
'''
# ---

@ -0,0 +1,255 @@
"""Test for Serializable base class"""
import json
import os
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from langchain.chains.llm import LLMChain
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.tracers.langchain import LangChainTracer
class Person(Serializable):
secret: str
you_can_see_me: str = "hello"
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@property
def lc_secrets(self) -> Dict[str, str]:
return {"secret": "SECRET"}
@property
def lc_attributes(self) -> Dict[str, str]:
return {"you_can_see_me": self.you_can_see_me}
class SpecialPerson(Person):
another_secret: str
another_visible: str = "bye"
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
# Gets merged with parent class's secrets
@property
def lc_secrets(self) -> Dict[str, str]:
return {"another_secret": "ANOTHER_SECRET"}
# Gets merged with parent class's attributes
@property
def lc_attributes(self) -> Dict[str, str]:
return {"another_visible": self.another_visible}
class NotSerializable:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="hello")
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm")
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3})
== """{"lc": 1, "type": "not_implemented", "id": ["builtins", "dict"], "repr": "{(1, 2): 3}"}""" # noqa: E501
)
@pytest.mark.requires("openai")
def test_serialize_openai_llm(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
with patch.dict(os.environ, {"LANGCHAIN_API_KEY": "test-api-key"}):
llm = OpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
# This is excluded from serialization
callbacks=[LangChainTracer()],
)
llm.temperature = 0.7 # this is reflected in serialization
assert dumps(llm, pretty=True) == snapshot
@pytest.mark.requires("openai")
def test_serialize_llmchain(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
@pytest.mark.requires("openai")
def test_serialize_llmchain_env() -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = OpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
prompt_2 = PromptTemplate.from_template("hello {name}!")
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_serialize_llmchain_chat(snapshot: Any) -> None:
from langchain_community.chat_models.openai import ChatOpenAI
llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = ChatOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
prompt_2 = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.requires("openai")
def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="hello")
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
class TestClass(Serializable):
my_favorite_secret: str = Field(alias="my_favorite_secret_alias")
my_other_secret: str = Field()
class Config:
"""Configuration for this pydantic object."""
allow_population_by_field_name = True
@root_validator(pre=True)
def get_from_env(cls, values: Dict) -> Dict:
"""Get the values from the environment."""
if "my_favorite_secret" not in values:
values["my_favorite_secret"] = os.getenv("MY_FAVORITE_SECRET")
if "my_other_secret" not in values:
values["my_other_secret"] = os.getenv("MY_OTHER_SECRET")
return values
@classmethod
def is_lc_serializable(cls) -> bool:
return True
@classmethod
def get_lc_namespace(cls) -> List[str]:
return ["my", "special", "namespace"]
@property
def lc_secrets(self) -> Dict[str, str]:
return {
"my_favorite_secret": "MY_FAVORITE_SECRET",
"my_other_secret": "MY_OTHER_SECRET",
}
def test_aliases_hidden() -> None:
test_class = TestClass(my_favorite_secret="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump
# Check while patching the os environment
with patch.dict(
os.environ, {"MY_FAVORITE_SECRET": "hello", "MY_OTHER_SECRET": "world"}
):
test_class = TestClass() # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
# Check by alias
test_class = TestClass(my_favorite_secret_alias="hello", my_other_secret="world") # type: ignore[call-arg]
dumped = json.loads(dumps(test_class, pretty=True))
expected_dump = {
"lc": 1,
"type": "constructor",
"id": ["my", "special", "namespace", "TestClass"],
"kwargs": {
"my_favorite_secret": {
"lc": 1,
"type": "secret",
"id": ["MY_FAVORITE_SECRET"],
},
"my_other_secret": {"lc": 1, "type": "secret", "id": ["MY_OTHER_SECRET"]},
},
}
assert dumped == expected_dump

@ -3,7 +3,6 @@ import inspect
import pkgutil
from types import ModuleType
import pytest
from langchain_core.load.mapping import SERIALIZABLE_MAPPING
@ -55,7 +54,6 @@ def import_all_modules(package_name: str) -> dict:
return classes
@pytest.mark.community
def test_import_all_modules() -> None:
"""Test import all modules works as expected"""
all_modules = import_all_modules("langchain")
@ -79,7 +77,6 @@ def test_import_all_modules() -> None:
)
@pytest.mark.community
def test_serializable_mapping() -> None:
to_skip = {
# This should have had a different namespace, as it was never

@ -1,11 +1,8 @@
import pytest
from langchain.retrievers.ensemble import EnsembleRetriever
from langchain_core.documents import Document
from langchain_core.embeddings import FakeEmbeddings
from langchain.retrievers.ensemble import EnsembleRetriever
pytest.importorskip("langchain_community")
@pytest.mark.requires("rank_bm25")
def test_ensemble_retriever_get_relevant_docs() -> None:

@ -4,20 +4,26 @@ from typing import Dict, Generator, List, Union
import pytest
from _pytest.fixtures import FixtureRequest
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.caches import InMemoryCache
from langchain_core.language_models import FakeListChatModel, FakeListLLM
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.llms import BaseLLM
from langchain_core.load import dumps
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, Generation
from sqlalchemy import create_engine
from langchain_core.outputs import ChatGeneration
from sqlalchemy import Column, Integer, Sequence, String, create_engine
from sqlalchemy.orm import Session
pytest.importorskip("langchain_community")
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.globals import get_llm_cache, set_llm_cache
from langchain_core.outputs import Generation, LLMResult
from langchain_community.cache import SQLAlchemyCache # noqa: E402
from tests.unit_tests.llms.fake_llm import FakeLLM
def get_sqlite_cache() -> SQLAlchemyCache:
@ -210,3 +216,44 @@ def create_llm_string(llm: Union[BaseLLM, BaseChatModel]) -> str:
_dict: Dict = llm.dict()
_dict["stop"] = None
return str(sorted([(k, v) for k, v in _dict.items()]))
def test_sql_alchemy_cache() -> None:
"""Test custom_caching behavior."""
Base = declarative_base()
class FulltextLLMCache(Base): # type: ignore
"""Postgres table for fulltext-indexed LLM Cache."""
__tablename__ = "llm_cache_fulltext"
id = Column(Integer, Sequence("cache_id"), primary_key=True)
prompt = Column(String, nullable=False)
llm = Column(String, nullable=False)
idx = Column(Integer)
response = Column(String)
engine = create_engine("sqlite://")
from langchain_community.cache import SQLAlchemyCache
set_llm_cache(SQLAlchemyCache(engine, FulltextLLMCache))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo", "bar", "foo"])
expected_cache_output = [Generation(text="foo")]
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [
[Generation(text="fizz")],
[Generation(text="foo")],
[Generation(text="fizz")],
]
expected_output = LLMResult(
generations=expected_generations,
llm_output=None,
)
assert output == expected_output

@ -23,7 +23,7 @@ from langchain._api import create_importer
_module_lookup = {
"APIChain": "langchain.chains.api.base",
"OpenAPIEndpointChain": "langchain.chains.api.openapi.chain",
"OpenAPIEndpointChain": "langchain_community.chains.openapi.chain",
"AnalyzeDocumentChain": "langchain.chains.combine_documents.base",
"MapReduceDocumentsChain": "langchain.chains.combine_documents.map_reduce",
"MapRerankDocumentsChain": "langchain.chains.combine_documents.map_rerank",
@ -36,23 +36,23 @@ _module_lookup = {
"ConversationalRetrievalChain": "langchain.chains.conversational_retrieval.base",
"generate_example": "langchain.chains.example_generator",
"FlareChain": "langchain.chains.flare.base",
"ArangoGraphQAChain": "langchain.chains.graph_qa.arangodb",
"GraphQAChain": "langchain.chains.graph_qa.base",
"GraphCypherQAChain": "langchain.chains.graph_qa.cypher",
"FalkorDBQAChain": "langchain.chains.graph_qa.falkordb",
"HugeGraphQAChain": "langchain.chains.graph_qa.hugegraph",
"KuzuQAChain": "langchain.chains.graph_qa.kuzu",
"NebulaGraphQAChain": "langchain.chains.graph_qa.nebulagraph",
"NeptuneOpenCypherQAChain": "langchain.chains.graph_qa.neptune_cypher",
"NeptuneSparqlQAChain": "langchain.chains.graph_qa.neptune_sparql",
"OntotextGraphDBQAChain": "langchain.chains.graph_qa.ontotext_graphdb",
"GraphSparqlQAChain": "langchain.chains.graph_qa.sparql",
"ArangoGraphQAChain": "langchain_community.chains.graph_qa.arangodb",
"GraphQAChain": "langchain_community.chains.graph_qa.base",
"GraphCypherQAChain": "langchain_community.chains.graph_qa.cypher",
"FalkorDBQAChain": "langchain_community.chains.graph_qa.falkordb",
"HugeGraphQAChain": "langchain_community.chains.graph_qa.hugegraph",
"KuzuQAChain": "langchain_community.chains.graph_qa.kuzu",
"NebulaGraphQAChain": "langchain_community.chains.graph_qa.nebulagraph",
"NeptuneOpenCypherQAChain": "langchain_community.chains.graph_qa.neptune_cypher",
"NeptuneSparqlQAChain": "langchain_community.chains.graph_qa.neptune_sparql",
"OntotextGraphDBQAChain": "langchain_community.chains.graph_qa.ontotext_graphdb",
"GraphSparqlQAChain": "langchain_community.chains.graph_qa.sparql",
"create_history_aware_retriever": "langchain.chains.history_aware_retriever",
"HypotheticalDocumentEmbedder": "langchain.chains.hyde.base",
"LLMChain": "langchain.chains.llm",
"LLMCheckerChain": "langchain.chains.llm_checker.base",
"LLMMathChain": "langchain.chains.llm_math.base",
"LLMRequestsChain": "langchain.chains.llm_requests",
"LLMRequestsChain": "langchain_community.chains.llm_requests",
"LLMSummarizationCheckerChain": "langchain.chains.llm_summarization_checker.base",
"load_chain": "langchain.chains.loading",
"MapReduceChain": "langchain.chains.mapreduce",

@ -343,7 +343,6 @@ addopts = "--strict-markers --strict-config --durations=5 --snapshot-warn-unused
markers = [
"requires: mark tests as requiring a specific library",
"scheduled: mark tests to run in scheduled testing",
"community: mark tests that require langchain-community to be installed",
"compile: mark placeholder test used to compile integration tests without running them"
]
asyncio_mode = "auto"

@ -1,14 +1,4 @@
"""Test base LLM functionality."""
import importlib
import pytest
from sqlalchemy import Column, Integer, Sequence, String, create_engine
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain_core.caches import InMemoryCache
from langchain_core.outputs import Generation, LLMResult
@ -50,48 +40,3 @@ def test_caching() -> None:
llm_output=None,
)
assert output == expected_output
@pytest.mark.skipif(
importlib.util.find_spec("langchain_community") is None,
reason="langchain_community not installed",
)
def test_custom_caching() -> None:
"""Test custom_caching behavior."""
Base = declarative_base()
class FulltextLLMCache(Base): # type: ignore
"""Postgres table for fulltext-indexed LLM Cache."""
__tablename__ = "llm_cache_fulltext"
id = Column(Integer, Sequence("cache_id"), primary_key=True)
prompt = Column(String, nullable=False)
llm = Column(String, nullable=False)
idx = Column(Integer)
response = Column(String)
engine = create_engine("sqlite://")
from langchain_community.cache import SQLAlchemyCache
set_llm_cache(SQLAlchemyCache(engine, FulltextLLMCache))
llm = FakeLLM()
params = llm.dict()
params["stop"] = None
llm_string = str(sorted([(k, v) for k, v in params.items()]))
get_llm_cache().update("foo", llm_string, [Generation(text="fizz")])
output = llm.generate(["foo", "bar", "foo"])
expected_cache_output = [Generation(text="foo")]
cache_output = get_llm_cache().lookup("bar", llm_string)
assert cache_output == expected_cache_output
set_llm_cache(None)
expected_generations = [
[Generation(text="fizz")],
[Generation(text="foo")],
[Generation(text="fizz")],
]
expected_output = LLMResult(
generations=expected_generations,
llm_output=None,
)
assert output == expected_output

@ -1,6 +1,3 @@
import pytest
from langchain_core.language_models import BaseLLM
from langchain import llms
EXPECT_ALL = [
@ -91,10 +88,3 @@ EXPECT_ALL = [
def test_all_imports() -> None:
"""Simple test to make sure all things can be imported."""
assert set(llms.__all__) == set(EXPECT_ALL)
@pytest.mark.community
def test_all_subclasses() -> None:
"""Simple test to make sure all things are subclasses of BaseLLM."""
for cls in llms.__all__:
assert issubclass(getattr(llms, cls), BaseLLM)

@ -8,12 +8,7 @@ from unittest.mock import patch
import pytest
from langchain_core.load.dump import dumps
from langchain_core.load.serializable import Serializable
from langchain_core.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.pydantic_v1 import Field, root_validator
from langchain_core.tracers.langchain import LangChainTracer
from langchain.chains.llm import LLMChain
class Person(Serializable):
@ -74,105 +69,6 @@ def test_typeerror() -> None:
)
@pytest.mark.community
@pytest.mark.requires("openai")
def test_serialize_openai_llm(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
with patch.dict(os.environ, {"LANGCHAIN_API_KEY": "test-api-key"}):
llm = OpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
# This is excluded from serialization
callbacks=[LangChainTracer()],
)
llm.temperature = 0.7 # this is reflected in serialization
assert dumps(llm, pretty=True) == snapshot
@pytest.mark.community
@pytest.mark.requires("openai")
def test_serialize_llmchain(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
@pytest.mark.community
@pytest.mark.requires("openai")
def test_serialize_llmchain_env() -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = OpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
prompt_2 = PromptTemplate.from_template("hello {name}!")
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.community
@pytest.mark.requires("openai")
def test_serialize_llmchain_chat(snapshot: Any) -> None:
from langchain_community.chat_models.openai import ChatOpenAI
llm = ChatOpenAI(model="davinci", temperature=0.5, openai_api_key="hello") # type: ignore[call-arg]
prompt = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
import os
has_env = "OPENAI_API_KEY" in os.environ
if not has_env:
os.environ["OPENAI_API_KEY"] = "env_variable"
llm_2 = ChatOpenAI(model="davinci", temperature=0.5) # type: ignore[call-arg]
prompt_2 = ChatPromptTemplate.from_messages(
[HumanMessagePromptTemplate.from_template("hello {name}!")]
)
chain_2 = LLMChain(llm=llm_2, prompt=prompt_2)
assert dumps(chain_2, pretty=True) == dumps(chain, pretty=True)
if not has_env:
del os.environ["OPENAI_API_KEY"]
@pytest.mark.community
@pytest.mark.requires("openai")
def test_serialize_llmchain_with_non_serializable_arg(snapshot: Any) -> None:
from langchain_community.llms.openai import OpenAI
llm = OpenAI( # type: ignore[call-arg]
model="davinci",
temperature=0.5,
openai_api_key="hello",
client=NotSerializable,
)
prompt = PromptTemplate.from_template("hello {name}!")
chain = LLMChain(llm=llm, prompt=prompt)
assert dumps(chain, pretty=True) == snapshot
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="hello")
assert dumps(person, separators=(",", ":")) == snapshot

@ -7,12 +7,10 @@ from langchain_core.prompts.prompt import PromptTemplate
from langchain.chains.llm import LLMChain
pytest.importorskip(
"langchain_community",
)
pytest.importorskip("langchain_openai", reason="langchain_openai not installed")
pytest.importorskip("langchain_community", reason="langchain_community not installed")
from langchain_community.llms.openai import ( # noqa: E402, # ignore: community-import
from langchain_community.llms.openai import ( # noqa: E402 # ignore: community-import
OpenAI as CommunityOpenAI,
)

@ -92,24 +92,3 @@ def test_test_group_dependencies(poetry_conf: Mapping[str, Any]) -> None:
"requests-mock",
]
)
@pytest.mark.community
def test_imports() -> None:
"""Test that you can import all top level things okay."""
from langchain_community.callbacks import OpenAICallbackHandler # noqa: F401
from langchain_community.chat_models import ChatOpenAI # noqa: F401
from langchain_community.document_loaders import BSHTMLLoader # noqa: F401
from langchain_community.embeddings import OpenAIEmbeddings # noqa: F401
from langchain_community.llms import OpenAI # noqa: F401
from langchain_community.retrievers import VespaRetriever # noqa: F401
from langchain_community.tools import DuckDuckGoSearchResults # noqa: F401
from langchain_community.utilities import (
SearchApiAPIWrapper, # noqa: F401
SerpAPIWrapper, # noqa: F401
)
from langchain_community.vectorstores import FAISS # noqa: F401
from langchain_core.prompts import BasePromptTemplate # noqa: F401
from langchain.agents import OpenAIFunctionsAgent # noqa: F401
from langchain.chains import LLMChain # noqa: F401

@ -1,13 +1,12 @@
import importlib
from pathlib import Path
import pytest
# Attempt to recursively import all modules in langchain
PKG_ROOT = Path(__file__).parent.parent.parent
COMMUNITY_NOT_INSTALLED = importlib.util.find_spec("langchain_community") is None
@pytest.mark.community
def test_import_all() -> None:
"""Generate the public API for this package."""
library_code = PKG_ROOT / "langchain"
@ -26,8 +25,13 @@ def test_import_all() -> None:
for name in all:
# Attempt to import the name from the module
obj = getattr(mod, name)
assert obj is not None
try:
obj = getattr(mod, name)
assert obj is not None
except ModuleNotFoundError as e:
# If the module is not installed, we suppress the error
if "Module langchain_community" in str(e) and COMMUNITY_NOT_INSTALLED:
pass
def test_import_all_using_dir() -> None:
@ -42,6 +46,9 @@ def test_import_all_using_dir() -> None:
# Without init
module_name = module_name.rsplit(".", 1)[0]
if module_name.startswith("langchain_community.") and COMMUNITY_NOT_INSTALLED:
continue
try:
mod = importlib.import_module(module_name)
except ModuleNotFoundError as e:

@ -1,16 +0,0 @@
import pytest
from langchain_core.vectorstores import VectorStore
from langchain import vectorstores
@pytest.mark.community
def test_all_imports() -> None:
"""Simple test to make sure all things can be imported."""
for cls in vectorstores.__all__:
if cls not in [
"AlibabaCloudOpenSearchSettings",
"ClickhouseSettings",
"MyScaleSettings",
]:
assert issubclass(getattr(vectorstores, cls), VectorStore)
Loading…
Cancel
Save