remove CVEs (#8092)

This PR aims to move all code with CVEs into `langchain.experimental`.
Note that we are NOT yet removing from the core `langchain` package - we
will give people a week to migrate here.

See MIGRATE.md for how to migrate

Zero changes to functionality

Vulnerabilities this addresses:

PALChain:
- https://security.snyk.io/vuln/SNYK-PYTHON-LANGCHAIN-5752409
- https://security.snyk.io/vuln/SNYK-PYTHON-LANGCHAIN-5759265

SQLDatabaseChain
- https://security.snyk.io/vuln/SNYK-PYTHON-LANGCHAIN-5759268

`load_prompt` (Python files only)
- https://security.snyk.io/vuln/SNYK-PYTHON-LANGCHAIN-5725807
pull/8096/head
Harrison Chase 11 months ago committed by GitHub
parent 08c658d3f8
commit d353d668e4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -0,0 +1,47 @@
# Migrating to `langchain.experimental`
We are moving any experimental components of langchain, or components with vulnerability issues, into `langchain.experimental`.
This guide covers how to migrate.
## Installation
Previously:
`pip install -U langchain`
Now:
`pip install -U langchain langchain.experimental`
## PALChain
Previously:
`from langchain.chains import PALChain`
Now:
`from langchain.experimental.pal_chain import PALChain`
## SQLDatabaseChain
Previously:
`from langchain.chains import SQLDatabaseChain`
Now:
`from langchain.experimental.sql import SQLDatabaseChain`
## `load_prompt` for Python files
Note: this only applies if you want to load Python files as prompts.
If you want to load json/yaml files, no change is needed.
Previously:
`from langchain.prompts import load_prompt`
Now:
`from langchain.experimental.prompts import load_prompt`

@ -2,6 +2,8 @@ from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
@ -21,7 +23,6 @@ from langchain.schema.messages import AIMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.tools.human.tool import HumanInputRun
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import ValidationError
class AutoGPT:

@ -1,8 +1,9 @@
from typing import Any, Dict, List
from pydantic import Field
from langchain.memory.chat_memory import BaseChatMemory, get_prompt_input_key
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import Field
class AutoGPTMemory(BaseChatMemory):

@ -1,6 +1,8 @@
import time
from typing import Any, Callable, List
from pydantic import BaseModel
from langchain.experimental.autonomous_agents.autogpt.prompt_generator import get_prompt
from langchain.prompts.chat import (
BaseChatPromptTemplate,
@ -8,7 +10,6 @@ from langchain.prompts.chat import (
from langchain.schema.messages import BaseMessage, HumanMessage, SystemMessage
from langchain.tools.base import BaseTool
from langchain.vectorstores.base import VectorStoreRetriever
from pydantic import BaseModel
class AutoGPTPrompt(BaseChatPromptTemplate, BaseModel):

@ -2,6 +2,8 @@
from collections import deque
from typing import Any, Dict, List, Optional
from pydantic import BaseModel, Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.autonomous_agents.baby_agi.task_creation import (
@ -15,7 +17,6 @@ from langchain.experimental.autonomous_agents.baby_agi.task_prioritization impor
)
from langchain.schema.language_model import BaseLanguageModel
from langchain.vectorstores.base import VectorStore
from pydantic import BaseModel, Field
class BabyAGI(Chain, BaseModel):

@ -1,4 +1,5 @@
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel

@ -1,4 +1,5 @@
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel

@ -1,4 +1,5 @@
from langchain import LLMChain, PromptTemplate
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel

@ -7,6 +7,7 @@ import json
from typing import Any, ClassVar, Dict, List, Optional, Type
import pydantic
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain

@ -5,9 +5,10 @@ from typing import Any, Optional, Union
import duckdb
import pandas as pd
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
from langchain.experimental.cpal.constants import Constant
from langchain.graphs.networkx_graph import NetworkxEntityGraph
from pydantic import BaseModel, Field, PrivateAttr, root_validator, validator
class NarrativeModel(BaseModel):

@ -2,11 +2,12 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional, Tuple
from langchain import LLMChain
from pydantic import BaseModel, Field
from langchain.chains import LLMChain
from langchain.experimental.generative_agents.memory import GenerativeAgentMemory
from langchain.prompts import PromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from pydantic import BaseModel, Field
class GenerativeAgent(BaseModel):

@ -3,7 +3,7 @@ import re
from datetime import datetime
from typing import Any, Dict, List, Optional
from langchain import LLMChain
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.retrievers import TimeWeightedVectorStoreRetriever
from langchain.schema import BaseMemory, Document

@ -4,9 +4,10 @@ from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from pydantic import Field, root_validator
if TYPE_CHECKING:
import jsonformer

@ -3,10 +3,11 @@ from __future__ import annotations
from typing import TYPE_CHECKING, Any, List, Optional, cast
from pydantic import Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain.llms.utils import enforce_stop_tokens
from pydantic import Field, root_validator
if TYPE_CHECKING:
import rellm

@ -0,0 +1,10 @@
"""Implements Program-Aided Language Models.
As in https://arxiv.org/pdf/2211.10435.pdf.
This is vulnerable to arbitrary code execution:
https://github.com/hwchase17/langchain/issues/5872
"""
from langchain.experimental.pal_chain.base import PALChain
__all__ = ["PALChain"]

@ -0,0 +1,314 @@
"""Implements Program-Aided Language Models.
This module implements the Program-Aided Language Models (PAL) for generating code
solutions. PAL is a technique described in the paper "Program-Aided Language Models"
(https://arxiv.org/pdf/2211.10435.pdf).
"""
from __future__ import annotations
import ast
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.pal.colored_object_prompt import COLORED_OBJECT_PROMPT
from langchain.chains.pal.math_prompt import MATH_PROMPT
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.utilities import PythonREPL
COMMAND_EXECUTION_FUNCTIONS = ["system", "exec", "execfile", "eval"]
class PALValidation:
SOLUTION_EXPRESSION_TYPE_FUNCTION = ast.FunctionDef
SOLUTION_EXPRESSION_TYPE_VARIABLE = ast.Name
def __init__(
self,
solution_expression_name: Optional[str] = None,
solution_expression_type: Optional[type] = None,
allow_imports: bool = False,
allow_command_exec: bool = False,
):
"""Initialize a PALValidation instance.
Args:
solution_expression_name (str): Name of the expected solution expression.
If passed, solution_expression_type must be passed as well.
solution_expression_type (str): AST type of the expected solution
expression. If passed, solution_expression_name must be passed as well.
Must be one of PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE.
allow_imports (bool): Allow import statements.
allow_command_exec (bool): Allow using known command execution functions.
"""
self.solution_expression_name = solution_expression_name
self.solution_expression_type = solution_expression_type
if solution_expression_name is not None:
if not isinstance(self.solution_expression_name, str):
raise ValueError(
f"Expected solution_expression_name to be str, "
f"instead found {type(self.solution_expression_name)}"
)
if solution_expression_type is not None:
if (
self.solution_expression_type
is not self.SOLUTION_EXPRESSION_TYPE_FUNCTION
and self.solution_expression_type
is not self.SOLUTION_EXPRESSION_TYPE_VARIABLE
):
raise ValueError(
f"Expected solution_expression_type to be one of "
f"({self.SOLUTION_EXPRESSION_TYPE_FUNCTION},"
f"{self.SOLUTION_EXPRESSION_TYPE_VARIABLE}),"
f"instead found {self.solution_expression_type}"
)
if solution_expression_name is not None and solution_expression_type is None:
raise TypeError(
"solution_expression_name "
"requires solution_expression_type to be passed as well"
)
if solution_expression_name is None and solution_expression_type is not None:
raise TypeError(
"solution_expression_type "
"requires solution_expression_name to be passed as well"
)
self.allow_imports = allow_imports
self.allow_command_exec = allow_command_exec
class PALChain(Chain):
"""Implements Program-Aided Language Models (PAL).
This class implements the Program-Aided Language Models (PAL) for generating code
solutions. PAL is a technique described in the paper "Program-Aided Language Models"
(https://arxiv.org/pdf/2211.10435.pdf).
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated]"""
prompt: BasePromptTemplate = MATH_PROMPT
"""[Deprecated]"""
stop: str = "\n\n"
"""Stop token to use when generating code."""
get_answer_expr: str = "print(solution())"
"""Expression to use to get the answer from the generated code."""
python_globals: Optional[Dict[str, Any]] = None
"""Python globals and locals to use when executing the generated code."""
python_locals: Optional[Dict[str, Any]] = None
"""Python globals and locals to use when executing the generated code."""
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
"""Whether to return intermediate steps in the generated code."""
code_validations: PALValidation = Field(default_factory=PALValidation)
"""Validations to perform on the generated code."""
timeout: Optional[int] = 10
"""Timeout in seconds for the generated code to execute."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating a PALChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using one of "
"the class method constructors from_math_prompt, "
"from_colored_object_prompt."
)
if "llm_chain" not in values and values["llm"] is not None:
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=MATH_PROMPT)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return self.prompt.input_variables
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, "intermediate_steps"]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
code = self.llm_chain.predict(
stop=[self.stop], callbacks=_run_manager.get_child(), **inputs
)
_run_manager.on_text(code, color="green", end="\n", verbose=self.verbose)
PALChain.validate_code(code, self.code_validations)
repl = PythonREPL(_globals=self.python_globals, _locals=self.python_locals)
res = repl.run(code + f"\n{self.get_answer_expr}", timeout=self.timeout)
output = {self.output_key: res.strip()}
if self.return_intermediate_steps:
output["intermediate_steps"] = code
return output
@classmethod
def validate_code(cls, code: str, code_validations: PALValidation) -> None:
try:
code_tree = ast.parse(code)
except (SyntaxError, UnicodeDecodeError):
raise ValueError(f"Generated code is not valid python code: {code}")
except TypeError:
raise ValueError(
f"Generated code is expected to be a string, "
f"instead found {type(code)}"
)
except OverflowError:
raise ValueError(
f"Generated code too long / complex to be parsed by ast: {code}"
)
found_solution_expr = False
if code_validations.solution_expression_name is None:
# Skip validation if no solution_expression_name was given
found_solution_expr = True
has_imports = False
top_level_nodes = list(ast.iter_child_nodes(code_tree))
for node in top_level_nodes:
if (
code_validations.solution_expression_name is not None
and code_validations.solution_expression_type is not None
):
# Check root nodes (like func def)
if (
isinstance(node, code_validations.solution_expression_type)
and hasattr(node, "name")
and node.name == code_validations.solution_expression_name
):
found_solution_expr = True
# Check assigned nodes (like answer variable)
if isinstance(node, ast.Assign):
for target_node in node.targets:
if (
isinstance(
target_node, code_validations.solution_expression_type
)
and hasattr(target_node, "id")
and target_node.id
== code_validations.solution_expression_name
):
found_solution_expr = True
if isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom):
has_imports = True
if not found_solution_expr:
raise ValueError(
f"Generated code is missing the solution expression: "
f"{code_validations.solution_expression_name} of type: "
f"{code_validations.solution_expression_type}"
)
if not code_validations.allow_imports and has_imports:
raise ValueError(f"Generated code has disallowed imports: {code}")
if (
not code_validations.allow_command_exec
or not code_validations.allow_imports
):
for node in ast.walk(code_tree):
if (
(not code_validations.allow_command_exec)
and isinstance(node, ast.Call)
and (
(
hasattr(node.func, "id")
and node.func.id in COMMAND_EXECUTION_FUNCTIONS
)
or (
isinstance(node.func, ast.Attribute)
and node.func.attr in COMMAND_EXECUTION_FUNCTIONS
)
)
):
raise ValueError(
f"Found illegal command execution function "
f"{node.func.id} in code {code}"
)
if (not code_validations.allow_imports) and (
isinstance(node, ast.Import) or isinstance(node, ast.ImportFrom)
):
raise ValueError(f"Generated code has disallowed imports: {code}")
@classmethod
def from_math_prompt(cls, llm: BaseLanguageModel, **kwargs: Any) -> PALChain:
"""Load PAL from math prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=MATH_PROMPT)
code_validations = PALValidation(
solution_expression_name="solution",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_FUNCTION,
)
return cls(
llm_chain=llm_chain,
stop="\n\n",
get_answer_expr="print(solution())",
code_validations=code_validations,
**kwargs,
)
@classmethod
def from_colored_object_prompt(
cls, llm: BaseLanguageModel, **kwargs: Any
) -> PALChain:
"""Load PAL from colored object prompt.
Args:
llm (BaseLanguageModel): The language model to use for generating code.
Returns:
PALChain: An instance of PALChain.
"""
llm_chain = LLMChain(llm=llm, prompt=COLORED_OBJECT_PROMPT)
code_validations = PALValidation(
solution_expression_name="answer",
solution_expression_type=PALValidation.SOLUTION_EXPRESSION_TYPE_VARIABLE,
)
return cls(
llm_chain=llm_chain,
stop="\n\n\n",
get_answer_expr="print(answer)",
code_validations=code_validations,
**kwargs,
)
@property
def _chain_type(self) -> str:
return "pal_chain"

@ -0,0 +1,77 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
template = (
"""
# Generate Python3 Code to solve problems
# Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?
# Put objects into a dictionary for quick look up
objects = dict()
objects['pencil'] = 'red'
objects['mug'] = 'purple'
objects['keychain'] = 'burgundy'
objects['teddy bear'] = 'fuchsia'
objects['plate'] = 'black'
objects['stress ball'] = 'blue'
# Look up the color of stress ball
stress_ball_color = objects['stress ball']
answer = stress_ball_color
# Q: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?
# Put objects into a list to record ordering
objects = []
objects += [('paperclip', 'purple')] * 1
objects += [('stress ball', 'pink')] * 1
objects += [('keychain', 'brown')] * 1
objects += [('scrunchiephone charger', 'green')] * 1
objects += [('fidget spinner', 'mauve')] * 1
objects += [('pen', 'burgundy')] * 1
# Find the index of the stress ball
stress_ball_idx = None
for i, object in enumerate(objects):
if object[0] == 'stress ball':
stress_ball_idx = i
break
# Find the directly right object
direct_right = objects[i+1]
# Check the directly right object's color
direct_right_color = direct_right[1]
answer = direct_right_color
# Q: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?
# Put objects into a list to record ordering
objects = []
objects += [('plate', 'teal')] * 1
objects += [('keychain', 'burgundy')] * 1
objects += [('scrunchiephone charger', 'yellow')] * 1
objects += [('mug', 'orange')] * 1
objects += [('notebook', 'pink')] * 1
objects += [('cup', 'grey')] * 1
# Find the index of the teal item
teal_idx = None
for i, object in enumerate(objects):
if object[1] == 'teal':
teal_idx = i
break
# Find non-orange items to the left of the teal item
non_orange = [object for object in objects[:i] if object[1] != 'orange']
# Count number of non-orange objects
num_non_orange = len(non_orange)
answer = num_non_orange
# Q: {question}
""".strip()
+ "\n"
)
COLORED_OBJECT_PROMPT = PromptTemplate(input_variables=["question"], template=template)

@ -0,0 +1,157 @@
# flake8: noqa
from langchain.prompts.prompt import PromptTemplate
template = (
'''
Q: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?
# solution in Python:
def solution():
"""Olivia has $23. She bought five bagels for $3 each. How much money does she have left?"""
money_initial = 23
bagels = 5
bagel_cost = 3
money_spent = bagels * bagel_cost
money_left = money_initial - money_spent
result = money_left
return result
Q: Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?
# solution in Python:
def solution():
"""Michael had 58 golf balls. On tuesday, he lost 23 golf balls. On wednesday, he lost 2 more. How many golf balls did he have at the end of wednesday?"""
golf_balls_initial = 58
golf_balls_lost_tuesday = 23
golf_balls_lost_wednesday = 2
golf_balls_left = golf_balls_initial - golf_balls_lost_tuesday - golf_balls_lost_wednesday
result = golf_balls_left
return result
Q: There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?
# solution in Python:
def solution():
"""There were nine computers in the server room. Five more computers were installed each day, from monday to thursday. How many computers are now in the server room?"""
computers_initial = 9
computers_per_day = 5
num_days = 4 # 4 days between monday and thursday
computers_added = computers_per_day * num_days
computers_total = computers_initial + computers_added
result = computers_total
return result
Q: Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?
# solution in Python:
def solution():
"""Shawn has five toys. For Christmas, he got two toys each from his mom and dad. How many toys does he have now?"""
toys_initial = 5
mom_toys = 2
dad_toys = 2
total_received = mom_toys + dad_toys
total_toys = toys_initial + total_received
result = total_toys
return result
Q: Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?
# solution in Python:
def solution():
"""Jason had 20 lollipops. He gave Denny some lollipops. Now Jason has 12 lollipops. How many lollipops did Jason give to Denny?"""
jason_lollipops_initial = 20
jason_lollipops_after = 12
denny_lollipops = jason_lollipops_initial - jason_lollipops_after
result = denny_lollipops
return result
Q: Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?
# solution in Python:
def solution():
"""Leah had 32 chocolates and her sister had 42. If they ate 35, how many pieces do they have left in total?"""
leah_chocolates = 32
sister_chocolates = 42
total_chocolates = leah_chocolates + sister_chocolates
chocolates_eaten = 35
chocolates_left = total_chocolates - chocolates_eaten
result = chocolates_left
return result
Q: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?
# solution in Python:
def solution():
"""If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?"""
cars_initial = 3
cars_arrived = 2
total_cars = cars_initial + cars_arrived
result = total_cars
return result
Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?
# solution in Python:
def solution():
"""There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done, there will be 21 trees. How many trees did the grove workers plant today?"""
trees_initial = 15
trees_after = 21
trees_added = trees_after - trees_initial
result = trees_added
return result
Q: {question}
# solution in Python:
'''.strip()
+ "\n\n\n"
)
MATH_PROMPT = PromptTemplate(input_variables=["question"], template=template)

@ -1,5 +1,7 @@
from typing import Any, Dict, List, Optional
from pydantic import Field
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.executors.base import BaseExecutor
@ -8,7 +10,6 @@ from langchain.experimental.plan_and_execute.schema import (
BaseStepContainer,
ListStepContainer,
)
from pydantic import Field
class PlanAndExecute(Chain):

@ -1,10 +1,11 @@
from abc import abstractmethod
from typing import Any
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.base import Chain
from langchain.experimental.plan_and_execute.schema import StepResponse
from pydantic import BaseModel
class BaseExecutor(BaseModel):

@ -1,10 +1,11 @@
from abc import abstractmethod
from typing import Any, List, Optional
from pydantic import BaseModel
from langchain.callbacks.manager import Callbacks
from langchain.chains.llm import LLMChain
from langchain.experimental.plan_and_execute.schema import Plan, PlanOutputParser
from pydantic import BaseModel
class BasePlanner(BaseModel):

@ -1,9 +1,10 @@
from abc import abstractmethod
from typing import List, Tuple
from langchain.schema import BaseOutputParser
from pydantic import BaseModel, Field
from langchain.schema import BaseOutputParser
class Step(BaseModel):
value: str

@ -0,0 +1,3 @@
from langchain.experimental.prompts.load import load_prompt
__all__ = ["load_prompt"]

@ -0,0 +1,52 @@
# Susceptible to arbitrary code execution: https://github.com/hwchase17/langchain/issues/4849
import importlib
import json
from pathlib import Path
from typing import Union
import yaml
from langchain.prompts.loading import load_prompt_from_config, try_load_from_hub
from langchain.schema.prompts import BasePromptTemplate
def load_prompt(path: Union[str, Path]) -> BasePromptTemplate:
"""Unified method for loading a prompt from LangChainHub or local fs."""
if hub_result := try_load_from_hub(
path, _load_prompt_from_file, "prompts", {"py", "json", "yaml"}
):
return hub_result
else:
return _load_prompt_from_file(path)
def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
"""Load prompt from file."""
# Convert file to a Path object.
if isinstance(file, str):
file_path = Path(file)
else:
file_path = file
# Load from either json or yaml.
if file_path.suffix == ".json":
with open(file_path) as f:
config = json.load(f)
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
elif file_path.suffix == ".py":
spec = importlib.util.spec_from_loader(
"prompt", loader=None, origin=str(file_path)
)
if spec is None:
raise ValueError("could not load spec")
helper = importlib.util.module_from_spec(spec)
with open(file_path, "rb") as f:
exec(f.read(), helper.__dict__)
if not isinstance(helper.PROMPT, BasePromptTemplate):
raise ValueError("Did not get object of type BasePromptTemplate.")
return helper.PROMPT
else:
raise ValueError(f"Got unsupported file type {file_path.suffix}")
# Load the prompt from the config now.
return load_prompt_from_config(config)

@ -0,0 +1,4 @@
"""Chain for interacting with SQL Database."""
from langchain.experimental.sql.base import SQLDatabaseChain
__all__ = ["SQLDatabaseChain"]

@ -0,0 +1,291 @@
"""Chain for interacting with SQL Database."""
from __future__ import annotations
import warnings
from typing import Any, Dict, List, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForChainRun
from langchain.chains.base import Chain
from langchain.chains.llm import LLMChain
from langchain.chains.sql_database.prompt import DECIDER_PROMPT, PROMPT, SQL_PROMPTS
from langchain.prompts.prompt import PromptTemplate
from langchain.schema import BasePromptTemplate
from langchain.schema.language_model import BaseLanguageModel
from langchain.tools.sql_database.prompt import QUERY_CHECKER
from langchain.utilities.sql_database import SQLDatabase
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.
Example:
.. code-block:: python
from langchain.experimental.sql import SQLDatabaseChain
from langchain import OpenAI, SQLDatabase
db = SQLDatabase(...)
db_chain = SQLDatabaseChain.from_llm(OpenAI(), db)
"""
llm_chain: LLMChain
llm: Optional[BaseLanguageModel] = None
"""[Deprecated] LLM wrapper to use."""
database: SQLDatabase = Field(exclude=True)
"""SQL Database to connect to."""
prompt: Optional[BasePromptTemplate] = None
"""[Deprecated] Prompt to use to translate natural language to SQL."""
top_k: int = 5
"""Number of results to return from the query"""
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_sql: bool = False
"""Will return sql-command directly without executing it"""
return_intermediate_steps: bool = False
"""Whether or not to return the intermediate steps along with the final answer."""
return_direct: bool = False
"""Whether or not to return the result of querying the SQL table directly."""
use_query_checker: bool = False
"""Whether or not the query checker tool should be used to attempt
to fix the initial SQL from the LLM."""
query_checker_prompt: Optional[BasePromptTemplate] = None
"""The prompt template that should be used by the query checker"""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator(pre=True)
def raise_deprecation(cls, values: Dict) -> Dict:
if "llm" in values:
warnings.warn(
"Directly instantiating an SQLDatabaseChain with an llm is deprecated. "
"Please instantiate with llm_chain argument or using the from_llm "
"class method."
)
if "llm_chain" not in values and values["llm"] is not None:
database = values["database"]
prompt = values.get("prompt") or SQL_PROMPTS.get(
database.dialect, PROMPT
)
values["llm_chain"] = LLMChain(llm=values["llm"], prompt=prompt)
return values
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
input_text = f"{inputs[self.input_key]}\nSQLQuery:"
_run_manager.on_text(input_text, verbose=self.verbose)
# If not present, then defaults to None which is all tables.
table_names_to_use = inputs.get("table_names_to_use")
table_info = self.database.get_table_info(table_names=table_names_to_use)
llm_inputs = {
"input": input_text,
"top_k": str(self.top_k),
"dialect": self.database.dialect,
"table_info": table_info,
"stop": ["\nSQLResult:"],
}
intermediate_steps: List = []
try:
intermediate_steps.append(llm_inputs) # input: sql generation
sql_cmd = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
if self.return_sql:
return {self.output_key: sql_cmd}
if not self.use_query_checker:
_run_manager.on_text(sql_cmd, color="green", verbose=self.verbose)
intermediate_steps.append(
sql_cmd
) # output: sql generation (no checker)
intermediate_steps.append({"sql_cmd": sql_cmd}) # input: sql exec
result = self.database.run(sql_cmd)
intermediate_steps.append(str(result)) # output: sql exec
else:
query_checker_prompt = self.query_checker_prompt or PromptTemplate(
template=QUERY_CHECKER, input_variables=["query", "dialect"]
)
query_checker_chain = LLMChain(
llm=self.llm_chain.llm, prompt=query_checker_prompt
)
query_checker_inputs = {
"query": sql_cmd,
"dialect": self.database.dialect,
}
checked_sql_command: str = query_checker_chain.predict(
callbacks=_run_manager.get_child(), **query_checker_inputs
).strip()
intermediate_steps.append(
checked_sql_command
) # output: sql generation (checker)
_run_manager.on_text(
checked_sql_command, color="green", verbose=self.verbose
)
intermediate_steps.append(
{"sql_cmd": checked_sql_command}
) # input: sql exec
result = self.database.run(checked_sql_command)
intermediate_steps.append(str(result)) # output: sql exec
sql_cmd = checked_sql_command
_run_manager.on_text("\nSQLResult: ", verbose=self.verbose)
_run_manager.on_text(result, color="yellow", verbose=self.verbose)
# If return direct, we just set the final result equal to
# the result of the sql query result, otherwise try to get a human readable
# final answer
if self.return_direct:
final_result = result
else:
_run_manager.on_text("\nAnswer:", verbose=self.verbose)
input_text += f"{sql_cmd}\nSQLResult: {result}\nAnswer:"
llm_inputs["input"] = input_text
intermediate_steps.append(llm_inputs) # input: final answer
final_result = self.llm_chain.predict(
callbacks=_run_manager.get_child(),
**llm_inputs,
).strip()
intermediate_steps.append(final_result) # output: final answer
_run_manager.on_text(final_result, color="green", verbose=self.verbose)
chain_result: Dict[str, Any] = {self.output_key: final_result}
if self.return_intermediate_steps:
chain_result[INTERMEDIATE_STEPS_KEY] = intermediate_steps
return chain_result
except Exception as exc:
# Append intermediate steps to exception, to aid in logging and later
# improvement of few shot prompt seeds
exc.intermediate_steps = intermediate_steps # type: ignore
raise exc
@property
def _chain_type(self) -> str:
return "sql_database_chain"
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
db: SQLDatabase,
prompt: Optional[BasePromptTemplate] = None,
**kwargs: Any,
) -> SQLDatabaseChain:
prompt = prompt or SQL_PROMPTS.get(db.dialect, PROMPT)
llm_chain = LLMChain(llm=llm, prompt=prompt)
return cls(llm_chain=llm_chain, database=db, **kwargs)
class SQLDatabaseSequentialChain(Chain):
"""Chain for querying SQL database that is a sequential chain.
The chain is as follows:
1. Based on the query, determine which tables to use.
2. Based on those tables, call the normal SQL database chain.
This is useful in cases where the number of tables in the database is large.
"""
decider_chain: LLMChain
sql_chain: SQLDatabaseChain
input_key: str = "query" #: :meta private:
output_key: str = "result" #: :meta private:
return_intermediate_steps: bool = False
@classmethod
def from_llm(
cls,
llm: BaseLanguageModel,
database: SQLDatabase,
query_prompt: BasePromptTemplate = PROMPT,
decider_prompt: BasePromptTemplate = DECIDER_PROMPT,
**kwargs: Any,
) -> SQLDatabaseSequentialChain:
"""Load the necessary chains."""
sql_chain = SQLDatabaseChain.from_llm(
llm, database, prompt=query_prompt, **kwargs
)
decider_chain = LLMChain(
llm=llm, prompt=decider_prompt, output_key="table_names"
)
return cls(sql_chain=sql_chain, decider_chain=decider_chain, **kwargs)
@property
def input_keys(self) -> List[str]:
"""Return the singular input key.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Return the singular output key.
:meta private:
"""
if not self.return_intermediate_steps:
return [self.output_key]
else:
return [self.output_key, INTERMEDIATE_STEPS_KEY]
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
_run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager()
_table_names = self.sql_chain.database.get_usable_table_names()
table_names = ", ".join(_table_names)
llm_inputs = {
"query": inputs[self.input_key],
"table_names": table_names,
}
_lowercased_table_names = [name.lower() for name in _table_names]
table_names_from_chain = self.decider_chain.predict_and_parse(**llm_inputs)
table_names_to_use = [
name
for name in table_names_from_chain
if name.lower() in _lowercased_table_names
]
_run_manager.on_text("Table names to use:", end="\n", verbose=self.verbose)
_run_manager.on_text(
str(table_names_to_use), color="yellow", verbose=self.verbose
)
new_inputs = {
self.sql_chain.input_key: inputs[self.input_key],
"table_names_to_use": table_names_to_use,
}
return self.sql_chain(
new_inputs, callbacks=_run_manager.get_child(), return_only_outputs=True
)
@property
def _chain_type(self) -> str:
return "sql_database_sequential_chain"

@ -0,0 +1,263 @@
# flake8: noqa
from langchain.output_parsers.list import CommaSeparatedListOutputParser
from langchain.prompts.prompt import PromptTemplate
PROMPT_SUFFIX = """Only use the following tables:
{table_info}
Question: {input}"""
_DEFAULT_TEMPLATE = """Given an input question, first create a syntactically correct {dialect} query to run, then look at the results of the query and return the answer. Unless the user specifies in his question a specific number of examples he wishes to obtain, always limit your query to at most {top_k} results. You can order the results by a relevant column to return the most interesting examples in the database.
Never query for all the columns from a specific table, only ask for a the few relevant columns given the question.
Pay attention to use only the column names that you can see in the schema description. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
PROMPT = PromptTemplate(
input_variables=["input", "table_info", "dialect", "top_k"],
template=_DEFAULT_TEMPLATE + PROMPT_SUFFIX,
)
_DECIDER_TEMPLATE = """Given the below input question and list of potential tables, output a comma separated list of the table names that may be necessary to answer this question.
Question: {query}
Table Names: {table_names}
Relevant Table Names:"""
DECIDER_PROMPT = PromptTemplate(
input_variables=["query", "table_names"],
template=_DECIDER_TEMPLATE,
output_parser=CommaSeparatedListOutputParser(),
)
_duckdb_prompt = """You are a DuckDB expert. Given an input question, first create a syntactically correct DuckDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per DuckDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
DUCKDB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_duckdb_prompt + PROMPT_SUFFIX,
)
_googlesql_prompt = """You are a GoogleSQL expert. Given an input question, first create a syntactically correct GoogleSQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per GoogleSQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURRENT_DATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
GOOGLESQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_googlesql_prompt + PROMPT_SUFFIX,
)
_mssql_prompt = """You are an MS SQL expert. Given an input question, first create a syntactically correct MS SQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the TOP clause as per MS SQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in square brackets ([]) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CAST(GETDATE() as date) function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MSSQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mssql_prompt + PROMPT_SUFFIX,
)
_mysql_prompt = """You are a MySQL expert. Given an input question, first create a syntactically correct MySQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MySQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURDATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MYSQL_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mysql_prompt + PROMPT_SUFFIX,
)
_mariadb_prompt = """You are a MariaDB expert. Given an input question, first create a syntactically correct MariaDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per MariaDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in backticks (`) to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURDATE() function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
MARIADB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_mariadb_prompt + PROMPT_SUFFIX,
)
_oracle_prompt = """You are an Oracle SQL expert. Given an input question, first create a syntactically correct Oracle SQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the FETCH FIRST n ROWS ONLY clause as per Oracle SQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use TRUNC(SYSDATE) function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
ORACLE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_oracle_prompt + PROMPT_SUFFIX,
)
_postgres_prompt = """You are a PostgreSQL expert. Given an input question, first create a syntactically correct PostgreSQL query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PostgreSQL. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use CURRENT_DATE function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
POSTGRES_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_postgres_prompt + PROMPT_SUFFIX,
)
_sqlite_prompt = """You are a SQLite expert. Given an input question, first create a syntactically correct SQLite query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per SQLite. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use date('now') function to get the current date, if the question involves "today".
Use the following format:
Question: Question here
SQLQuery: SQL Query to run
SQLResult: Result of the SQLQuery
Answer: Final answer here
"""
SQLITE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_sqlite_prompt + PROMPT_SUFFIX,
)
_clickhouse_prompt = """You are a ClickHouse expert. Given an input question, first create a syntactically correct Clic query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per ClickHouse. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use today() function to get the current date, if the question involves "today".
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
"""
CLICKHOUSE_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_clickhouse_prompt + PROMPT_SUFFIX,
)
_prestodb_prompt = """You are a PrestoDB expert. Given an input question, first create a syntactically correct PrestoDB query to run, then look at the results of the query and return the answer to the input question.
Unless the user specifies in the question a specific number of examples to obtain, query for at most {top_k} results using the LIMIT clause as per PrestoDB. You can order the results to return the most informative data in the database.
Never query for all columns from a table. You must query only the columns that are needed to answer the question. Wrap each column name in double quotes (") to denote them as delimited identifiers.
Pay attention to use only the column names you can see in the tables below. Be careful to not query for columns that do not exist. Also, pay attention to which column is in which table.
Pay attention to use current_date function to get the current date, if the question involves "today".
Use the following format:
Question: "Question here"
SQLQuery: "SQL Query to run"
SQLResult: "Result of the SQLQuery"
Answer: "Final answer here"
"""
PRESTODB_PROMPT = PromptTemplate(
input_variables=["input", "table_info", "top_k"],
template=_prestodb_prompt + PROMPT_SUFFIX,
)
SQL_PROMPTS = {
"duckdb": DUCKDB_PROMPT,
"googlesql": GOOGLESQL_PROMPT,
"mssql": MSSQL_PROMPT,
"mysql": MYSQL_PROMPT,
"mariadb": MARIADB_PROMPT,
"oracle": ORACLE_PROMPT,
"postgresql": POSTGRES_PROMPT,
"sqlite": SQLITE_PROMPT,
"clickhouse": CLICKHOUSE_PROMPT,
"prestodb": PRESTODB_PROMPT,
}

@ -1476,21 +1476,21 @@ files = [
[[package]]
name = "langchain"
version = "0.0.235"
version = "0.0.239"
description = "Building applications with LLMs through composability"
category = "main"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langchain-0.0.235-py3-none-any.whl", hash = "sha256:aed680f62afc0031cc5b0df840a4c5f8fe6d874356fc5695b92f8c0f779462b0"},
{file = "langchain-0.0.235.tar.gz", hash = "sha256:6734849e111a4c60e187a1ce7856b31e623604326e48b96601316d108633abd1"},
{file = "langchain-0.0.239-py3-none-any.whl", hash = "sha256:e353e3290d4e6550788e6cd23828a4872e749008cb1a7564ab81ffc861315fd6"},
{file = "langchain-0.0.239.tar.gz", hash = "sha256:a739ae141808e3d0abafe12c3daa4402c81bab8de033da48966702de6f647d96"},
]
[package.dependencies]
aiohttp = ">=3.8.3,<4.0.0"
async-timeout = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.11\""}
dataclasses-json = ">=0.5.7,<0.6.0"
langsmith = ">=0.0.7,<0.0.8"
langsmith = ">=0.0.11,<0.1.0"
numexpr = ">=2.8.4,<3.0.0"
numpy = ">=1,<2"
openapi-schema-pydantic = ">=1.2,<2.0"
@ -1501,13 +1501,13 @@ SQLAlchemy = ">=1.4,<3"
tenacity = ">=8.1.0,<9.0.0"
[package.extras]
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "anthropic (>=0.3,<0.4)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.3,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=0.11.0,<0.12.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "octoai-sdk (>=0.1.1,<0.2.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
all = ["O365 (>=2.0.26,<3.0.0)", "aleph-alpha-client (>=2.15.0,<3.0.0)", "amadeus (>=8.1.0)", "anthropic (>=0.3,<0.4)", "arxiv (>=1.4,<2.0)", "atlassian-python-api (>=3.36.0,<4.0.0)", "awadb (>=0.3.3,<0.4.0)", "azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "beautifulsoup4 (>=4,<5)", "clarifai (>=9.1.0)", "clickhouse-connect (>=0.5.14,<0.6.0)", "cohere (>=3,<4)", "deeplake (>=3.6.8,<4.0.0)", "docarray[hnswlib] (>=0.32.0,<0.33.0)", "duckduckgo-search (>=3.8.3,<4.0.0)", "elasticsearch (>=8,<9)", "esprima (>=4.0.1,<5.0.0)", "faiss-cpu (>=1,<2)", "google-api-python-client (==2.70.0)", "google-auth (>=2.18.1,<3.0.0)", "google-search-results (>=2,<3)", "gptcache (>=0.1.7)", "html2text (>=2020.1.16,<2021.0.0)", "huggingface_hub (>=0,<1)", "jina (>=3.14,<4.0)", "jinja2 (>=3,<4)", "jq (>=1.4.1,<2.0.0)", "lancedb (>=0.1,<0.2)", "langkit (>=0.0.6,<0.1.0)", "lark (>=1.1.5,<2.0.0)", "libdeeplake (>=0.0.60,<0.0.61)", "lxml (>=4.9.2,<5.0.0)", "manifest-ml (>=0.0.1,<0.0.2)", "marqo (>=0.11.0,<0.12.0)", "momento (>=1.5.0,<2.0.0)", "nebula3-python (>=3.4.0,<4.0.0)", "neo4j (>=5.8.1,<6.0.0)", "networkx (>=2.6.3,<3.0.0)", "nlpcloud (>=1,<2)", "nltk (>=3,<4)", "nomic (>=1.0.43,<2.0.0)", "octoai-sdk (>=0.1.1,<0.2.0)", "openai (>=0,<1)", "openlm (>=0.0.5,<0.0.6)", "opensearch-py (>=2.0.0,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pexpect (>=4.8.0,<5.0.0)", "pgvector (>=0.1.6,<0.2.0)", "pinecone-client (>=2,<3)", "pinecone-text (>=0.4.2,<0.5.0)", "psycopg2-binary (>=2.9.5,<3.0.0)", "pymongo (>=4.3.3,<5.0.0)", "pyowm (>=3.3.0,<4.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pytesseract (>=0.3.10,<0.4.0)", "pyvespa (>=0.33.0,<0.34.0)", "qdrant-client (>=1.3.1,<2.0.0)", "rdflib (>=6.3.2,<7.0.0)", "redis (>=4,<5)", "requests-toolbelt (>=1.0.0,<2.0.0)", "sentence-transformers (>=2,<3)", "singlestoredb (>=0.7.1,<0.8.0)", "spacy (>=3,<4)", "steamship (>=2.16.9,<3.0.0)", "tensorflow-text (>=2.11.0,<3.0.0)", "tigrisdb (>=1.0.0b6,<2.0.0)", "tiktoken (>=0.3.2,<0.4.0)", "torch (>=1,<3)", "transformers (>=4,<5)", "weaviate-client (>=3,<4)", "wikipedia (>=1,<2)", "wolframalpha (==5.0.0)"]
azure = ["azure-ai-formrecognizer (>=3.2.1,<4.0.0)", "azure-ai-vision (>=0.11.1b1,<0.12.0)", "azure-cognitiveservices-speech (>=1.28.0,<2.0.0)", "azure-core (>=1.26.4,<2.0.0)", "azure-cosmos (>=4.4.0b1,<5.0.0)", "azure-identity (>=1.12.0,<2.0.0)", "azure-search-documents (==11.4.0a20230509004)", "openai (>=0,<1)"]
clarifai = ["clarifai (>=9.1.0)"]
cohere = ["cohere (>=3,<4)"]
docarray = ["docarray[hnswlib] (>=0.32.0,<0.33.0)"]
embeddings = ["sentence-transformers (>=2,<3)"]
extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.0.7,<0.0.8)", "chardet (>=5.1.0,<6.0.0)", "esprima (>=4.0.1,<5.0.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "openai (>=0,<1)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.32)"]
extended-testing = ["atlassian-python-api (>=3.36.0,<4.0.0)", "beautifulsoup4 (>=4,<5)", "bibtexparser (>=1.4.0,<2.0.0)", "cassio (>=0.0.7,<0.0.8)", "chardet (>=5.1.0,<6.0.0)", "esprima (>=4.0.1,<5.0.0)", "geopandas (>=0.13.1,<0.14.0)", "gql (>=3.4.1,<4.0.0)", "html2text (>=2020.1.16,<2021.0.0)", "jq (>=1.4.1,<2.0.0)", "lxml (>=4.9.2,<5.0.0)", "mwparserfromhell (>=0.6.4,<0.7.0)", "mwxml (>=0.3.3,<0.4.0)", "openai (>=0,<1)", "pandas (>=2.0.1,<3.0.0)", "pdfminer-six (>=20221105,<20221106)", "pgvector (>=0.1.6,<0.2.0)", "psychicapi (>=0.8.0,<0.9.0)", "py-trello (>=0.19.0,<0.20.0)", "pymupdf (>=1.22.3,<2.0.0)", "pypdf (>=3.4.0,<4.0.0)", "pypdfium2 (>=4.10.0,<5.0.0)", "pyspark (>=3.4.0,<4.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "rapidfuzz (>=3.1.1,<4.0.0)", "requests-toolbelt (>=1.0.0,<2.0.0)", "scikit-learn (>=1.2.2,<2.0.0)", "streamlit (>=1.18.0,<2.0.0)", "sympy (>=1.12,<2.0)", "telethon (>=1.28.5,<2.0.0)", "tqdm (>=4.48.0)", "zep-python (>=0.32)"]
javascript = ["esprima (>=4.0.1,<5.0.0)"]
llms = ["anthropic (>=0.3,<0.4)", "clarifai (>=9.1.0)", "cohere (>=3,<4)", "huggingface_hub (>=0,<1)", "manifest-ml (>=0.0.1,<0.0.2)", "nlpcloud (>=1,<2)", "openai (>=0,<1)", "openllm (>=0.1.19)", "openlm (>=0.0.5,<0.0.6)", "torch (>=1,<3)", "transformers (>=4,<5)"]
openai = ["openai (>=0,<1)", "tiktoken (>=0.3.2,<0.4.0)"]
@ -1516,14 +1516,14 @@ text-helpers = ["chardet (>=5.1.0,<6.0.0)"]
[[package]]
name = "langsmith"
version = "0.0.7"
version = "0.0.12"
description = "Client library to connect to the LangSmith LLM Tracing and Evaluation Platform."
category = "main"
optional = false
python-versions = ">=3.8.1,<4.0"
files = [
{file = "langsmith-0.0.7-py3-none-any.whl", hash = "sha256:9e0ab264b499daa778c694f9129859830820d3fb3a7c93309b630a22b68a88a9"},
{file = "langsmith-0.0.7.tar.gz", hash = "sha256:2f18e51cfd4e42f2b3cf00fa87e9d03012eb7269cdafd8e7c0cf7aa828dcc03e"},
{file = "langsmith-0.0.12-py3-none-any.whl", hash = "sha256:d7a4360d1984b57689c6470ffcef79dcb217dd7129bdfd3bcfcb22dc058787f6"},
{file = "langsmith-0.0.12.tar.gz", hash = "sha256:5abf30ac4ebb6b68955bea617669585e80a6fe9def90664503d5fbd6d095e91e"},
]
[package.dependencies]
@ -3092,6 +3092,18 @@ files = [
docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"]
test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"]
[[package]]
name = "types-pyyaml"
version = "6.0.12.11"
description = "Typing stubs for PyYAML"
category = "dev"
optional = false
python-versions = "*"
files = [
{file = "types-PyYAML-6.0.12.11.tar.gz", hash = "sha256:7d340b19ca28cddfdba438ee638cd4084bde213e501a3978738543e27094775b"},
{file = "types_PyYAML-6.0.12.11-py3-none-any.whl", hash = "sha256:a461508f3096d1d5810ec5ab95d7eeecb651f3a15b71959999988942063bf01d"},
]
[[package]]
name = "typing-extensions"
version = "4.7.1"
@ -3329,4 +3341,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = ">=3.8.1,<4.0"
content-hash = "1dfc3c259e2f261f99a643eb5053185cc3df63023e5f04399b0b53f352d6220d"
content-hash = "bd737027e0fd9ea2ee823632f89dbd947c7d5f41bb05fc1cbff04106ae3dd350"

@ -1,16 +1,19 @@
[tool.poetry]
name = "langchain.experimental"
name = "langchain-experimental"
version = "0.0.1"
description = "Building applications with LLMs through composability"
authors = []
license = "MIT"
readme = "README.md"
repository = "https://www.github.com/hwchase17/langchain"
packages = [
{include = "langchain"}
]
[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
langchain = "^0.0.235"
langchain = ">=0.0.239"
[tool.poetry.group.lint.dependencies]
@ -19,6 +22,7 @@ black = "^23.1.0"
[tool.poetry.group.typing.dependencies]
mypy = "^0.991"
types-pyyaml = "^6.0.12.2"
[tool.poetry.group.dev.dependencies]
jupyter = "^1.0.0"

@ -1,4 +1,6 @@
"""Implements Program-Aided Language Models.
As in https://arxiv.org/pdf/2211.10435.pdf.
TODO: deprecate
"""

@ -19,6 +19,7 @@ from langchain.utilities.sql_database import SQLDatabase
INTERMEDIATE_STEPS_KEY = "intermediate_steps"
# TODO: deprecate
class SQLDatabaseChain(Chain):
"""Chain for interacting with SQL Database.

@ -140,6 +140,7 @@ def _load_prompt_from_file(file: Union[str, Path]) -> BasePromptTemplate:
elif file_path.suffix == ".yaml":
with open(file_path, "r") as f:
config = yaml.safe_load(f)
# TODO: deprecate this
elif file_path.suffix == ".py":
spec = importlib.util.spec_from_loader(
"prompt", loader=None, origin=str(file_path)

Loading…
Cancel
Save