-
-
Notifications
You must be signed in to change notification settings - Fork 1.6k
/
Copy pathxml_scraper_graph.py
98 lines (79 loc) · 3.18 KB
/
xml_scraper_graph.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
"""
XMLScraperGraph Module
"""
from typing import Optional, Type
from pydantic import BaseModel
from ..nodes import FetchNode, GenerateAnswerNode
from .abstract_graph import AbstractGraph
from .base_graph import BaseGraph
class XMLScraperGraph(AbstractGraph):
"""
XMLScraperGraph is a scraping pipeline that extracts information from XML files using a natural
language model to interpret and answer prompts.
Attributes:
prompt (str): The prompt for the graph.
source (str): The source of the graph.
config (dict): Configuration parameters for the graph.
schema (BaseModel): The schema for the graph output.
llm_model: An instance of a language model client, configured for generating answers.
embedder_model: An instance of an embedding model client,
configured for generating embeddings.
verbose (bool): A flag indicating whether to show print statements during execution.
headless (bool): A flag indicating whether to run the graph in headless mode.
model_token (int): The token limit for the language model.
Args:
prompt (str): The prompt for the graph.
source (str): The source of the graph.
config (dict): Configuration parameters for the graph.
schema (BaseModel): The schema for the graph output.
Example:
>>> xml_scraper = XMLScraperGraph(
... "List me all the attractions in Chioggia.",
... "data/chioggia.xml",
... {"llm": {"model": "openai/gpt-3.5-turbo"}}
... )
>>> result = xml_scraper.run()
"""
def __init__(
self,
prompt: str,
source: str,
config: dict,
schema: Optional[Type[BaseModel]] = None,
):
super().__init__(prompt, config, source, schema)
self.input_key = "xml" if source.endswith("xml") else "xml_dir"
def _create_graph(self) -> BaseGraph:
"""
Creates the graph of nodes representing the workflow for web scraping.
Returns:
BaseGraph: A graph instance representing the web scraping workflow.
"""
fetch_node = FetchNode(input="xml | xml_dir", output=["doc"])
generate_answer_node = GenerateAnswerNode(
input="user_prompt & (relevant_chunks | doc)",
output=["answer"],
node_config={
"llm_model": self.llm_model,
"additional_info": self.config.get("additional_info"),
"schema": self.schema,
},
)
return BaseGraph(
nodes=[
fetch_node,
generate_answer_node,
],
edges=[(fetch_node, generate_answer_node)],
entry_point=fetch_node,
graph_name=self.__class__.__name__,
)
def run(self) -> str:
"""
Executes the web scraping process and returns the answer to the prompt.
Returns:
str: The answer to the prompt.
"""
inputs = {"user_prompt": self.prompt, self.input_key: self.source}
self.final_state, self.execution_info = self.graph.execute(inputs)
return self.final_state.get("answer", "No answer found.")