|
1 | 1 | import os |
2 | 2 | from typing import List |
3 | 3 |
|
4 | | -import bs4 # BeautifulSoup to parse HTML |
| 4 | +import bs4 # BeautifulSoup to parse HTML |
| 5 | +from langchain.chat_models import init_chat_model |
5 | 6 | from langchain_community.document_loaders import WebBaseLoader |
6 | 7 | from langchain_core.documents import Document |
7 | 8 | from langchain_core.messages import SystemMessage |
8 | 9 | from langchain_core.tools import tool |
9 | 10 | from langchain_openai import OpenAIEmbeddings |
10 | 11 | from langchain_text_splitters import RecursiveCharacterTextSplitter |
11 | | -from typing_extensions import TypedDict |
12 | | - |
13 | | -from langchain import hub |
14 | | -from langchain.chat_models import init_chat_model |
15 | 12 | from langgraph.checkpoint.memory import MemorySaver |
16 | 13 | from langgraph.graph import END, MessagesState, StateGraph |
17 | 14 | from langgraph.prebuilt import ToolNode, create_react_agent, tools_condition |
| 15 | +from typing_extensions import TypedDict |
| 16 | + |
| 17 | +from langchain import hub |
18 | 18 |
|
19 | 19 | os.environ["OPENAI_API_KEY"] = input("OpenAI API key: ") |
20 | 20 |
|
|
56 | 56 |
|
57 | 57 |
|
58 | 58 | # Turn the retrieve step into a tool call option for the LLM |
59 | | -@tool(response_format="content_and_artifact") # make the model see only the first output |
| 59 | +@tool( |
| 60 | + response_format="content_and_artifact" |
| 61 | +) # make the model see only the first output |
60 | 62 | def retrieve(query: str): |
61 | 63 | """Retrieve information related to a query.""" |
62 | 64 | retrieved_docs = vector_store.similarity_search(query, k=2) |
|
0 commit comments