Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 22 additions & 17 deletions libs/core/langchain_core/prompts/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -903,23 +903,28 @@ def __init__(
5. A string which is shorthand for `("human", template)`; e.g.,
`"{user_input}"`
template_format: Format of the template.
input_variables: A list of the names of the variables whose values are
required as inputs to the prompt.
optional_variables: A list of the names of the variables for placeholder
or MessagePlaceholder that are optional.

These variables are auto inferred from the prompt and user need not
provide them.
partial_variables: A dictionary of the partial variables the prompt
template carries.

Partial variables populate the template so that you don't need to pass
them in every time you call the prompt.
validate_template: Whether to validate the template.
input_types: A dictionary of the types of the variables the prompt template
expects.

If not provided, all variables are assumed to be strings.
**kwargs: Additional keyword arguments passed to `BasePromptTemplate`,
including (but not limited to):

- `input_variables`: A list of the names of the variables whose values
are required as inputs to the prompt.
- `optional_variables`: A list of the names of the variables for
placeholder or `MessagePlaceholder` that are optional.

These variables are auto inferred from the prompt and user need not
provide them.

- `partial_variables`: A dictionary of the partial variables the prompt
template carries.

Partial variables populate the template so that you don't need to
pass them in every time you call the prompt.

- `validate_template`: Whether to validate the template.
- `input_types`: A dictionary of the types of the variables the prompt
template expects.

If not provided, all variables are assumed to be strings.

Examples:
Instantiation from a list of message templates:
Expand Down
4 changes: 2 additions & 2 deletions libs/core/uv.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ def __init__(
total: The total number of items to be processed.
ncols: The character width of the progress bar.
end_with: Last string to print after progress bar reaches end.
**kwargs: Additional keyword arguments.
"""
self.total = total
self.ncols = ncols
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -295,11 +295,7 @@ def _get_prompt(inputs: dict[str, Any]) -> str:


class ChatModelInput(TypedDict):
"""Input for a chat model.

Args:
messages: List of chat messages.
"""
"""Input for a chat model."""

messages: list[BaseMessage]

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,8 @@ def serialize_outputs(self, outputs: dict) -> str:
The serialized output text from the first generation.

Raises:
ValueError: If no generations are found in the outputs,
or if the generations are empty.
ValueError: If no generations are found in the outputs or if the generations
are empty.
"""
if not outputs.get("generations"):
msg = "Cannot evaluate LLM Run without generations."
Expand Down Expand Up @@ -436,8 +436,8 @@ def from_run_and_data_type(
The instantiated evaluation chain.

Raises:
If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.
ValueError: If the run type is not supported, or if the evaluator requires a
reference from the dataset but the reference key is not provided.

"""
# Configure how run inputs/predictions are passed to the evaluator
Expand Down
45 changes: 0 additions & 45 deletions libs/partners/openai/langchain_openai/chat_models/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -905,51 +905,6 @@ def with_structured_output(
!!! note
`strict` can only be non-null if `method` is `'json_schema'`
or `'function_calling'`.
tools:
A list of tool-like objects to bind to the chat model. Requires that:

- `method` is `'json_schema'` (default).
- `strict=True`
- `include_raw=True`

If a model elects to call a
tool, the resulting `AIMessage` in `'raw'` will include tool calls.

??? example

```python
from langchain.chat_models import init_chat_model
from pydantic import BaseModel


class ResponseSchema(BaseModel):
response: str


def get_weather(location: str) -> str:
\"\"\"Get weather at a location.\"\"\"
pass

model = init_chat_model("openai:gpt-4o-mini")

structured_model = model.with_structured_output(
ResponseSchema,
tools=[get_weather],
strict=True,
include_raw=True,
)

structured_model.invoke("What's the weather in Boston?")
```

```python
{
"raw": AIMessage(content="", tool_calls=[...], ...),
"parsing_error": None,
"parsed": None,
}
```

kwargs: Additional keyword args are passed through to the model.

Returns:
Expand Down
Loading