将flask改成fastapi
This commit is contained in:
0
graphrag/__init__.py
Normal file
0
graphrag/__init__.py
Normal file
248
graphrag/entity_resolution.py
Normal file
248
graphrag/entity_resolution.py
Normal file
@@ -0,0 +1,248 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import itertools
|
||||
import os
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable
|
||||
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from graphrag.general.extractor import Extractor
|
||||
from rag.nlp import is_english
|
||||
import editdistance
|
||||
from graphrag.entity_resolution_prompt import ENTITY_RESOLUTION_PROMPT
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import perform_variable_replacements, chat_limiter, GraphChange
|
||||
|
||||
DEFAULT_RECORD_DELIMITER = "##"
|
||||
DEFAULT_ENTITY_INDEX_DELIMITER = "<|>"
|
||||
DEFAULT_RESOLUTION_RESULT_DELIMITER = "&&"
|
||||
|
||||
|
||||
@dataclass
|
||||
class EntityResolutionResult:
|
||||
"""Entity resolution result class definition."""
|
||||
graph: nx.Graph
|
||||
change: GraphChange
|
||||
|
||||
|
||||
class EntityResolution(Extractor):
|
||||
"""Entity resolution class definition."""
|
||||
|
||||
_resolution_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_record_delimiter_key: str
|
||||
_entity_index_delimiter_key: str
|
||||
_resolution_result_delimiter_key: str
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
):
|
||||
super().__init__(llm_invoker)
|
||||
"""Init method definition."""
|
||||
self._llm = llm_invoker
|
||||
self._resolution_prompt = ENTITY_RESOLUTION_PROMPT
|
||||
self._record_delimiter_key = "record_delimiter"
|
||||
self._entity_index_dilimiter_key = "entity_index_delimiter"
|
||||
self._resolution_result_delimiter_key = "resolution_result_delimiter"
|
||||
self._input_text_key = "input_text"
|
||||
|
||||
async def __call__(self, graph: nx.Graph,
|
||||
subgraph_nodes: set[str],
|
||||
prompt_variables: dict[str, Any] | None = None,
|
||||
callback: Callable | None = None) -> EntityResolutionResult:
|
||||
"""Call method definition."""
|
||||
if prompt_variables is None:
|
||||
prompt_variables = {}
|
||||
|
||||
# Wire defaults into the prompt variables
|
||||
self.prompt_variables = {
|
||||
**prompt_variables,
|
||||
self._record_delimiter_key: prompt_variables.get(self._record_delimiter_key)
|
||||
or DEFAULT_RECORD_DELIMITER,
|
||||
self._entity_index_dilimiter_key: prompt_variables.get(self._entity_index_dilimiter_key)
|
||||
or DEFAULT_ENTITY_INDEX_DELIMITER,
|
||||
self._resolution_result_delimiter_key: prompt_variables.get(self._resolution_result_delimiter_key)
|
||||
or DEFAULT_RESOLUTION_RESULT_DELIMITER,
|
||||
}
|
||||
|
||||
nodes = sorted(graph.nodes())
|
||||
entity_types = sorted(set(graph.nodes[node].get('entity_type', '-') for node in nodes))
|
||||
node_clusters = {entity_type: [] for entity_type in entity_types}
|
||||
|
||||
for node in nodes:
|
||||
node_clusters[graph.nodes[node].get('entity_type', '-')].append(node)
|
||||
|
||||
candidate_resolution = {entity_type: [] for entity_type in entity_types}
|
||||
for k, v in node_clusters.items():
|
||||
candidate_resolution[k] = [(a, b) for a, b in itertools.combinations(v, 2) if (a in subgraph_nodes or b in subgraph_nodes) and self.is_similarity(a, b)]
|
||||
num_candidates = sum([len(candidates) for _, candidates in candidate_resolution.items()])
|
||||
callback(msg=f"Identified {num_candidates} candidate pairs")
|
||||
remain_candidates_to_resolve = num_candidates
|
||||
|
||||
resolution_result = set()
|
||||
resolution_result_lock = trio.Lock()
|
||||
resolution_batch_size = 100
|
||||
max_concurrent_tasks = 5
|
||||
semaphore = trio.Semaphore(max_concurrent_tasks)
|
||||
|
||||
async def limited_resolve_candidate(candidate_batch, result_set, result_lock):
|
||||
nonlocal remain_candidates_to_resolve, callback
|
||||
async with semaphore:
|
||||
try:
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
with trio.move_on_after(280 if enable_timeout_assertion else 1000000000) as cancel_scope:
|
||||
await self._resolve_candidate(candidate_batch, result_set, result_lock)
|
||||
remain_candidates_to_resolve = remain_candidates_to_resolve - len(candidate_batch[1])
|
||||
callback(msg=f"Resolved {len(candidate_batch[1])} pairs, {remain_candidates_to_resolve} are remained to resolve. ")
|
||||
if cancel_scope.cancelled_caught:
|
||||
logging.warning(f"Timeout resolving {candidate_batch}, skipping...")
|
||||
remain_candidates_to_resolve = remain_candidates_to_resolve - len(candidate_batch[1])
|
||||
callback(msg=f"Fail to resolved {len(candidate_batch[1])} pairs due to timeout reason, skipped. {remain_candidates_to_resolve} are remained to resolve. ")
|
||||
except Exception as e:
|
||||
logging.error(f"Error resolving candidate batch: {e}")
|
||||
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for candidate_resolution_i in candidate_resolution.items():
|
||||
if not candidate_resolution_i[1]:
|
||||
continue
|
||||
for i in range(0, len(candidate_resolution_i[1]), resolution_batch_size):
|
||||
candidate_batch = candidate_resolution_i[0], candidate_resolution_i[1][i:i + resolution_batch_size]
|
||||
nursery.start_soon(limited_resolve_candidate, candidate_batch, resolution_result, resolution_result_lock)
|
||||
|
||||
callback(msg=f"Resolved {num_candidates} candidate pairs, {len(resolution_result)} of them are selected to merge.")
|
||||
|
||||
change = GraphChange()
|
||||
connect_graph = nx.Graph()
|
||||
connect_graph.add_edges_from(resolution_result)
|
||||
|
||||
async def limited_merge_nodes(graph, nodes, change):
|
||||
async with semaphore:
|
||||
await self._merge_graph_nodes(graph, nodes, change)
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for sub_connect_graph in nx.connected_components(connect_graph):
|
||||
merging_nodes = list(sub_connect_graph)
|
||||
nursery.start_soon(limited_merge_nodes, graph, merging_nodes, change)
|
||||
|
||||
# Update pagerank
|
||||
pr = nx.pagerank(graph)
|
||||
for node_name, pagerank in pr.items():
|
||||
graph.nodes[node_name]["pagerank"] = pagerank
|
||||
|
||||
return EntityResolutionResult(
|
||||
graph=graph,
|
||||
change=change,
|
||||
)
|
||||
|
||||
async def _resolve_candidate(self, candidate_resolution_i: tuple[str, list[tuple[str, str]]], resolution_result: set[str], resolution_result_lock: trio.Lock):
|
||||
pair_txt = [
|
||||
f'When determining whether two {candidate_resolution_i[0]}s are the same, you should only focus on critical properties and overlook noisy factors.\n']
|
||||
for index, candidate in enumerate(candidate_resolution_i[1]):
|
||||
pair_txt.append(
|
||||
f'Question {index + 1}: name of{candidate_resolution_i[0]} A is {candidate[0]} ,name of{candidate_resolution_i[0]} B is {candidate[1]}')
|
||||
sent = 'question above' if len(pair_txt) == 1 else f'above {len(pair_txt)} questions'
|
||||
pair_txt.append(
|
||||
f'\nUse domain knowledge of {candidate_resolution_i[0]}s to help understand the text and answer the {sent} in the format: For Question i, Yes, {candidate_resolution_i[0]} A and {candidate_resolution_i[0]} B are the same {candidate_resolution_i[0]}./No, {candidate_resolution_i[0]} A and {candidate_resolution_i[0]} B are different {candidate_resolution_i[0]}s. For Question i+1, (repeat the above procedures)')
|
||||
pair_prompt = '\n'.join(pair_txt)
|
||||
variables = {
|
||||
**self.prompt_variables,
|
||||
self._input_text_key: pair_prompt
|
||||
}
|
||||
text = perform_variable_replacements(self._resolution_prompt, variables=variables)
|
||||
logging.info(f"Created resolution prompt {len(text)} bytes for {len(candidate_resolution_i[1])} entity pairs of type {candidate_resolution_i[0]}")
|
||||
async with chat_limiter:
|
||||
try:
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
with trio.move_on_after(280 if enable_timeout_assertion else 1000000000) as cancel_scope:
|
||||
response = await trio.to_thread.run_sync(self._chat, text, [{"role": "user", "content": "Output:"}], {})
|
||||
if cancel_scope.cancelled_caught:
|
||||
logging.warning("_resolve_candidate._chat timeout, skipping...")
|
||||
return
|
||||
except Exception as e:
|
||||
logging.error(f"_resolve_candidate._chat failed: {e}")
|
||||
return
|
||||
|
||||
logging.debug(f"_resolve_candidate chat prompt: {text}\nchat response: {response}")
|
||||
result = self._process_results(len(candidate_resolution_i[1]), response,
|
||||
self.prompt_variables.get(self._record_delimiter_key,
|
||||
DEFAULT_RECORD_DELIMITER),
|
||||
self.prompt_variables.get(self._entity_index_dilimiter_key,
|
||||
DEFAULT_ENTITY_INDEX_DELIMITER),
|
||||
self.prompt_variables.get(self._resolution_result_delimiter_key,
|
||||
DEFAULT_RESOLUTION_RESULT_DELIMITER))
|
||||
async with resolution_result_lock:
|
||||
for result_i in result:
|
||||
resolution_result.add(candidate_resolution_i[1][result_i[0] - 1])
|
||||
|
||||
def _process_results(
|
||||
self,
|
||||
records_length: int,
|
||||
results: str,
|
||||
record_delimiter: str,
|
||||
entity_index_delimiter: str,
|
||||
resolution_result_delimiter: str
|
||||
) -> list:
|
||||
ans_list = []
|
||||
records = [r.strip() for r in results.split(record_delimiter)]
|
||||
for record in records:
|
||||
pattern_int = f"{re.escape(entity_index_delimiter)}(\d+){re.escape(entity_index_delimiter)}"
|
||||
match_int = re.search(pattern_int, record)
|
||||
res_int = int(str(match_int.group(1) if match_int else '0'))
|
||||
if res_int > records_length:
|
||||
continue
|
||||
|
||||
pattern_bool = f"{re.escape(resolution_result_delimiter)}([a-zA-Z]+){re.escape(resolution_result_delimiter)}"
|
||||
match_bool = re.search(pattern_bool, record)
|
||||
res_bool = str(match_bool.group(1) if match_bool else '')
|
||||
|
||||
if res_int and res_bool:
|
||||
if res_bool.lower() == 'yes':
|
||||
ans_list.append((res_int, "yes"))
|
||||
|
||||
return ans_list
|
||||
|
||||
def _has_digit_in_2gram_diff(self, a, b):
|
||||
def to_2gram_set(s):
|
||||
return {s[i:i+2] for i in range(len(s) - 1)}
|
||||
|
||||
set_a = to_2gram_set(a)
|
||||
set_b = to_2gram_set(b)
|
||||
diff = set_a ^ set_b
|
||||
|
||||
return any(any(c.isdigit() for c in pair) for pair in diff)
|
||||
|
||||
def is_similarity(self, a, b):
|
||||
if self._has_digit_in_2gram_diff(a, b):
|
||||
return False
|
||||
|
||||
if is_english(a) and is_english(b):
|
||||
if editdistance.eval(a, b) <= min(len(a), len(b)) // 2:
|
||||
return True
|
||||
return False
|
||||
|
||||
a, b = set(a), set(b)
|
||||
max_l = max(len(a), len(b))
|
||||
if max_l < 4:
|
||||
return len(a & b) > 1
|
||||
|
||||
return len(a & b)*1./max_l >= 0.8
|
||||
|
||||
74
graphrag/entity_resolution_prompt.py
Normal file
74
graphrag/entity_resolution_prompt.py
Normal file
@@ -0,0 +1,74 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
ENTITY_RESOLUTION_PROMPT = """
|
||||
-Goal-
|
||||
Please answer the following Question as required
|
||||
|
||||
-Steps-
|
||||
1. Identify each line of questioning as required
|
||||
|
||||
2. Return output in English as a single list of each line answer in steps 1. Use **{record_delimiter}** as the list delimiter.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
Example 1:
|
||||
|
||||
Question:
|
||||
When determining whether two Products are the same, you should only focus on critical properties and overlook noisy factors.
|
||||
|
||||
Demonstration 1: name of Product A is : "computer", name of Product B is :"phone" No, Product A and Product B are different products.
|
||||
Question 1: name of Product A is : "television", name of Product B is :"TV"
|
||||
Question 2: name of Product A is : "cup", name of Product B is :"mug"
|
||||
Question 3: name of Product A is : "soccer", name of Product B is :"football"
|
||||
Question 4: name of Product A is : "pen", name of Product B is :"eraser"
|
||||
|
||||
Use domain knowledge of Products to help understand the text and answer the above 4 questions in the format: For Question i, Yes, Product A and Product B are the same product. or No, Product A and Product B are different products. For Question i+1, (repeat the above procedures)
|
||||
################
|
||||
Output:
|
||||
(For question {entity_index_delimiter}1{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
|
||||
(For question {entity_index_delimiter}2{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
|
||||
(For question {entity_index_delimiter}3{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, Product A and Product B are the same product.){record_delimiter}
|
||||
(For question {entity_index_delimiter}4{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, Product A and Product B are different products.){record_delimiter}
|
||||
#############################
|
||||
|
||||
Example 2:
|
||||
|
||||
Question:
|
||||
When determining whether two toponym are the same, you should only focus on critical properties and overlook noisy factors.
|
||||
|
||||
Demonstration 1: name of toponym A is : "nanjing", name of toponym B is :"nanjing city" No, toponym A and toponym B are same toponym.
|
||||
Question 1: name of toponym A is : "Chicago", name of toponym B is :"ChiTown"
|
||||
Question 2: name of toponym A is : "Shanghai", name of toponym B is :"Zhengzhou"
|
||||
Question 3: name of toponym A is : "Beijing", name of toponym B is :"Peking"
|
||||
Question 4: name of toponym A is : "Los Angeles", name of toponym B is :"Cleveland"
|
||||
|
||||
Use domain knowledge of toponym to help understand the text and answer the above 4 questions in the format: For Question i, Yes, toponym A and toponym B are the same toponym. or No, toponym A and toponym B are different toponym. For Question i+1, (repeat the above procedures)
|
||||
################
|
||||
Output:
|
||||
(For question {entity_index_delimiter}1{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, toponym A and toponym B are same toponym.){record_delimiter}
|
||||
(For question {entity_index_delimiter}2{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, toponym A and toponym B are different toponym.){record_delimiter}
|
||||
(For question {entity_index_delimiter}3{entity_index_delimiter}, {resolution_result_delimiter}yes{resolution_result_delimiter}, toponym A and toponym B are the same toponym.){record_delimiter}
|
||||
(For question {entity_index_delimiter}4{entity_index_delimiter}, {resolution_result_delimiter}no{resolution_result_delimiter}, toponym A and toponym B are different toponym.){record_delimiter}
|
||||
#############################
|
||||
|
||||
-Real Data-
|
||||
######################
|
||||
Question:{input_text}
|
||||
######################
|
||||
Output:
|
||||
"""
|
||||
0
graphrag/general/__init__.py
Normal file
0
graphrag/general/__init__.py
Normal file
158
graphrag/general/community_report_prompt.py
Normal file
158
graphrag/general/community_report_prompt.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [GraphRAG](https://github.com/microsoft/graphrag/blob/main/graphrag/prompts/index/community_report.py)
|
||||
"""
|
||||
|
||||
COMMUNITY_REPORT_PROMPT = """
|
||||
You are an AI assistant that helps a human analyst to perform general information discovery. Information discovery is the process of identifying and assessing relevant information associated with certain entities (e.g., organizations and individuals) within a network.
|
||||
|
||||
# Goal
|
||||
Write a comprehensive report of a community, given a list of entities that belong to the community as well as their relationships and optional associated claims. The report will be used to inform decision-makers about information associated with the community and their potential impact. The content of this report includes an overview of the community's key entities, their legal compliance, technical capabilities, reputation, and noteworthy claims.
|
||||
|
||||
# Report Structure
|
||||
|
||||
The report should include the following sections:
|
||||
|
||||
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
|
||||
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
|
||||
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
|
||||
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
|
||||
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
|
||||
|
||||
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
|
||||
{{
|
||||
"title": <report_title>,
|
||||
"summary": <executive_summary>,
|
||||
"rating": <impact_severity_rating>,
|
||||
"rating_explanation": <rating_explanation>,
|
||||
"findings": [
|
||||
{{
|
||||
"summary":<insight_1_summary>,
|
||||
"explanation": <insight_1_explanation>
|
||||
}},
|
||||
{{
|
||||
"summary":<insight_2_summary>,
|
||||
"explanation": <insight_2_explanation>
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
# Grounding Rules
|
||||
|
||||
Points supported by data should list their data references as follows:
|
||||
|
||||
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
|
||||
|
||||
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
|
||||
|
||||
For example:
|
||||
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
|
||||
|
||||
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
|
||||
|
||||
Do not include information where the supporting evidence for it is not provided.
|
||||
|
||||
|
||||
# Example Input
|
||||
-----------
|
||||
Text:
|
||||
|
||||
-Entities-
|
||||
|
||||
id,entity,description
|
||||
5,VERDANT OASIS PLAZA,Verdant Oasis Plaza is the location of the Unity March
|
||||
6,HARMONY ASSEMBLY,Harmony Assembly is an organization that is holding a march at Verdant Oasis Plaza
|
||||
|
||||
-Relationships-
|
||||
|
||||
id,source,target,description
|
||||
37,VERDANT OASIS PLAZA,UNITY MARCH,Verdant Oasis Plaza is the location of the Unity March
|
||||
38,VERDANT OASIS PLAZA,HARMONY ASSEMBLY,Harmony Assembly is holding a march at Verdant Oasis Plaza
|
||||
39,VERDANT OASIS PLAZA,UNITY MARCH,The Unity March is taking place at Verdant Oasis Plaza
|
||||
40,VERDANT OASIS PLAZA,TRIBUNE SPOTLIGHT,Tribune Spotlight is reporting on the Unity march taking place at Verdant Oasis Plaza
|
||||
41,VERDANT OASIS PLAZA,BAILEY ASADI,Bailey Asadi is speaking at Verdant Oasis Plaza about the march
|
||||
43,HARMONY ASSEMBLY,UNITY MARCH,Harmony Assembly is organizing the Unity March
|
||||
|
||||
Output:
|
||||
{{
|
||||
"title": "Verdant Oasis Plaza and Unity March",
|
||||
"summary": "The community revolves around the Verdant Oasis Plaza, which is the location of the Unity March. The plaza has relationships with the Harmony Assembly, Unity March, and Tribune Spotlight, all of which are associated with the march event.",
|
||||
"rating": 5.0,
|
||||
"rating_explanation": "The impact severity rating is moderate due to the potential for unrest or conflict during the Unity March.",
|
||||
"findings": [
|
||||
{{
|
||||
"summary": "Verdant Oasis Plaza as the central location",
|
||||
"explanation": "Verdant Oasis Plaza is the central entity in this community, serving as the location for the Unity March. This plaza is the common link between all other entities, suggesting its significance in the community. The plaza's association with the march could potentially lead to issues such as public disorder or conflict, depending on the nature of the march and the reactions it provokes. [Data: Entities (5), Relationships (37, 38, 39, 40, 41,+more)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Harmony Assembly's role in the community",
|
||||
"explanation": "Harmony Assembly is another key entity in this community, being the organizer of the march at Verdant Oasis Plaza. The nature of Harmony Assembly and its march could be a potential source of threat, depending on their objectives and the reactions they provoke. The relationship between Harmony Assembly and the plaza is crucial in understanding the dynamics of this community. [Data: Entities(6), Relationships (38, 43)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Unity March as a significant event",
|
||||
"explanation": "The Unity March is a significant event taking place at Verdant Oasis Plaza. This event is a key factor in the community's dynamics and could be a potential source of threat, depending on the nature of the march and the reactions it provokes. The relationship between the march and the plaza is crucial in understanding the dynamics of this community. [Data: Relationships (39)]"
|
||||
}},
|
||||
{{
|
||||
"summary": "Role of Tribune Spotlight",
|
||||
"explanation": "Tribune Spotlight is reporting on the Unity March taking place in Verdant Oasis Plaza. This suggests that the event has attracted media attention, which could amplify its impact on the community. The role of Tribune Spotlight could be significant in shaping public perception of the event and the entities involved. [Data: Relationships (40)]"
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
|
||||
# Real Data
|
||||
|
||||
Use the following text for your answer. Do not make anything up in your answer.
|
||||
|
||||
Text:
|
||||
|
||||
-Entities-
|
||||
{entity_df}
|
||||
|
||||
-Relationships-
|
||||
{relation_df}
|
||||
|
||||
The report should include the following sections:
|
||||
|
||||
- TITLE: community's name that represents its key entities - title should be short but specific. When possible, include representative named entities in the title.
|
||||
- SUMMARY: An executive summary of the community's overall structure, how its entities are related to each other, and significant information associated with its entities.
|
||||
- IMPACT SEVERITY RATING: a float score between 0-10 that represents the severity of IMPACT posed by entities within the community. IMPACT is the scored importance of a community.
|
||||
- RATING EXPLANATION: Give a single sentence explanation of the IMPACT severity rating.
|
||||
- DETAILED FINDINGS: A list of 5-10 key insights about the community. Each insight should have a short summary followed by multiple paragraphs of explanatory text grounded according to the grounding rules below. Be comprehensive.
|
||||
|
||||
Return output as a well-formed JSON-formatted string with the following format(in language of 'Text' content):
|
||||
{{
|
||||
"title": <report_title>,
|
||||
"summary": <executive_summary>,
|
||||
"rating": <impact_severity_rating>,
|
||||
"rating_explanation": <rating_explanation>,
|
||||
"findings": [
|
||||
{{
|
||||
"summary":<insight_1_summary>,
|
||||
"explanation": <insight_1_explanation>
|
||||
}},
|
||||
{{
|
||||
"summary":<insight_2_summary>,
|
||||
"explanation": <insight_2_explanation>
|
||||
}}
|
||||
]
|
||||
}}
|
||||
|
||||
# Grounding Rules
|
||||
|
||||
Points supported by data should list their data references as follows:
|
||||
|
||||
"This is an example sentence supported by multiple data references [Data: <dataset name> (record ids); <dataset name> (record ids)]."
|
||||
|
||||
Do not list more than 5 record ids in a single reference. Instead, list the top 5 most relevant record ids and add "+more" to indicate that there are more.
|
||||
|
||||
For example:
|
||||
"Person X is the owner of Company Y and subject to many allegations of wrongdoing [Data: Reports (1), Entities (5, 7); Relationships (23); Claims (7, 2, 34, 64, 46, +more)]."
|
||||
|
||||
where 1, 5, 7, 23, 2, 34, 46, and 64 represent the id (not the index) of the relevant data record.
|
||||
|
||||
Do not include information where the supporting evidence for it is not provided.
|
||||
|
||||
Output:"""
|
||||
166
graphrag/general/community_reports_extractor.py
Normal file
166
graphrag/general/community_reports_extractor.py
Normal file
@@ -0,0 +1,166 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Callable
|
||||
from dataclasses import dataclass
|
||||
import networkx as nx
|
||||
import pandas as pd
|
||||
|
||||
from api.utils.api_utils import timeout
|
||||
from graphrag.general import leiden
|
||||
from graphrag.general.community_report_prompt import COMMUNITY_REPORT_PROMPT
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.leiden import add_community_info2graph
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from graphrag.utils import perform_variable_replacements, dict_has_keys_with_types, chat_limiter
|
||||
from rag.utils import num_tokens_from_string
|
||||
import trio
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommunityReportsResult:
|
||||
"""Community reports result class definition."""
|
||||
|
||||
output: list[str]
|
||||
structured_output: list[dict]
|
||||
|
||||
|
||||
class CommunityReportsExtractor(Extractor):
|
||||
"""Community reports extractor class definition."""
|
||||
|
||||
_extraction_prompt: str
|
||||
_output_formatter_prompt: str
|
||||
_max_report_length: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
max_report_length: int | None = None,
|
||||
):
|
||||
super().__init__(llm_invoker)
|
||||
"""Init method definition."""
|
||||
self._llm = llm_invoker
|
||||
self._extraction_prompt = COMMUNITY_REPORT_PROMPT
|
||||
self._max_report_length = max_report_length or 1500
|
||||
|
||||
async def __call__(self, graph: nx.Graph, callback: Callable | None = None):
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
for node_degree in graph.degree:
|
||||
graph.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
|
||||
|
||||
communities: dict[str, dict[str, list]] = leiden.run(graph, {})
|
||||
total = sum([len(comm.items()) for _, comm in communities.items()])
|
||||
res_str = []
|
||||
res_dict = []
|
||||
over, token_count = 0, 0
|
||||
@timeout(120)
|
||||
async def extract_community_report(community):
|
||||
nonlocal res_str, res_dict, over, token_count
|
||||
cm_id, cm = community
|
||||
weight = cm["weight"]
|
||||
ents = cm["nodes"]
|
||||
if len(ents) < 2:
|
||||
return
|
||||
ent_list = [{"entity": ent, "description": graph.nodes[ent]["description"]} for ent in ents]
|
||||
ent_df = pd.DataFrame(ent_list)
|
||||
|
||||
rela_list = []
|
||||
k = 0
|
||||
for i in range(0, len(ents)):
|
||||
if k >= 10000:
|
||||
break
|
||||
for j in range(i + 1, len(ents)):
|
||||
if k >= 10000:
|
||||
break
|
||||
edge = graph.get_edge_data(ents[i], ents[j])
|
||||
if edge is None:
|
||||
continue
|
||||
rela_list.append({"source": ents[i], "target": ents[j], "description": edge["description"]})
|
||||
k += 1
|
||||
rela_df = pd.DataFrame(rela_list)
|
||||
|
||||
prompt_variables = {
|
||||
"entity_df": ent_df.to_csv(index_label="id"),
|
||||
"relation_df": rela_df.to_csv(index_label="id")
|
||||
}
|
||||
text = perform_variable_replacements(self._extraction_prompt, variables=prompt_variables)
|
||||
async with chat_limiter:
|
||||
try:
|
||||
with trio.move_on_after(180 if enable_timeout_assertion else 1000000000) as cancel_scope:
|
||||
response = await trio.to_thread.run_sync( self._chat, text, [{"role": "user", "content": "Output:"}], {})
|
||||
if cancel_scope.cancelled_caught:
|
||||
logging.warning("extract_community_report._chat timeout, skipping...")
|
||||
return
|
||||
except Exception as e:
|
||||
logging.error(f"extract_community_report._chat failed: {e}")
|
||||
return
|
||||
token_count += num_tokens_from_string(text + response)
|
||||
response = re.sub(r"^[^\{]*", "", response)
|
||||
response = re.sub(r"[^\}]*$", "", response)
|
||||
response = re.sub(r"\{\{", "{", response)
|
||||
response = re.sub(r"\}\}", "}", response)
|
||||
logging.debug(response)
|
||||
try:
|
||||
response = json.loads(response)
|
||||
except json.JSONDecodeError as e:
|
||||
logging.error(f"Failed to parse JSON response: {e}")
|
||||
logging.error(f"Response content: {response}")
|
||||
return
|
||||
if not dict_has_keys_with_types(response, [
|
||||
("title", str),
|
||||
("summary", str),
|
||||
("findings", list),
|
||||
("rating", float),
|
||||
("rating_explanation", str),
|
||||
]):
|
||||
return
|
||||
response["weight"] = weight
|
||||
response["entities"] = ents
|
||||
add_community_info2graph(graph, ents, response["title"])
|
||||
res_str.append(self._get_text_output(response))
|
||||
res_dict.append(response)
|
||||
over += 1
|
||||
if callback:
|
||||
callback(msg=f"Communities: {over}/{total}, used tokens: {token_count}")
|
||||
|
||||
st = trio.current_time()
|
||||
async with trio.open_nursery() as nursery:
|
||||
for level, comm in communities.items():
|
||||
logging.info(f"Level {level}: Community: {len(comm.keys())}")
|
||||
for community in comm.items():
|
||||
nursery.start_soon(extract_community_report, community)
|
||||
if callback:
|
||||
callback(msg=f"Community reports done in {trio.current_time() - st:.2f}s, used tokens: {token_count}")
|
||||
|
||||
return CommunityReportsResult(
|
||||
structured_output=res_dict,
|
||||
output=res_str,
|
||||
)
|
||||
|
||||
def _get_text_output(self, parsed_output: dict) -> str:
|
||||
title = parsed_output.get("title", "Report")
|
||||
summary = parsed_output.get("summary", "")
|
||||
findings = parsed_output.get("findings", [])
|
||||
|
||||
def finding_summary(finding: dict):
|
||||
if isinstance(finding, str):
|
||||
return finding
|
||||
return finding.get("summary")
|
||||
|
||||
def finding_explanation(finding: dict):
|
||||
if isinstance(finding, str):
|
||||
return ""
|
||||
return finding.get("explanation")
|
||||
|
||||
report_sections = "\n\n".join(
|
||||
f"## {finding_summary(f)}\n\n{finding_explanation(f)}" for f in findings
|
||||
)
|
||||
return f"# {title}\n\n{summary}\n\n{report_sections}"
|
||||
66
graphrag/general/entity_embedding.py
Normal file
66
graphrag/general/entity_embedding.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
import numpy as np
|
||||
import networkx as nx
|
||||
from dataclasses import dataclass
|
||||
from graphrag.general.leiden import stable_largest_connected_component
|
||||
import graspologic as gc
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeEmbeddings:
|
||||
"""Node embeddings class definition."""
|
||||
|
||||
nodes: list[str]
|
||||
embeddings: np.ndarray
|
||||
|
||||
|
||||
def embed_node2vec(
|
||||
graph: nx.Graph | nx.DiGraph,
|
||||
dimensions: int = 1536,
|
||||
num_walks: int = 10,
|
||||
walk_length: int = 40,
|
||||
window_size: int = 2,
|
||||
iterations: int = 3,
|
||||
random_seed: int = 86,
|
||||
) -> NodeEmbeddings:
|
||||
"""Generate node embeddings using Node2Vec."""
|
||||
# generate embedding
|
||||
lcc_tensors = gc.embed.node2vec_embed( # type: ignore
|
||||
graph=graph,
|
||||
dimensions=dimensions,
|
||||
window_size=window_size,
|
||||
iterations=iterations,
|
||||
num_walks=num_walks,
|
||||
walk_length=walk_length,
|
||||
random_seed=random_seed,
|
||||
)
|
||||
return NodeEmbeddings(embeddings=lcc_tensors[0], nodes=lcc_tensors[1])
|
||||
|
||||
|
||||
def run(graph: nx.Graph, args: dict[str, Any]) -> dict:
|
||||
"""Run method definition."""
|
||||
if args.get("use_lcc", True):
|
||||
graph = stable_largest_connected_component(graph)
|
||||
|
||||
# create graph embedding using node2vec
|
||||
embeddings = embed_node2vec(
|
||||
graph=graph,
|
||||
dimensions=args.get("dimensions", 1536),
|
||||
num_walks=args.get("num_walks", 10),
|
||||
walk_length=args.get("walk_length", 40),
|
||||
window_size=args.get("window_size", 2),
|
||||
iterations=args.get("iterations", 3),
|
||||
random_seed=args.get("random_seed", 86),
|
||||
)
|
||||
|
||||
pairs = zip(embeddings.nodes, embeddings.embeddings.tolist(), strict=True)
|
||||
sorted_pairs = sorted(pairs, key=lambda x: x[0])
|
||||
|
||||
return dict(sorted_pairs)
|
||||
243
graphrag/general/extractor.py
Normal file
243
graphrag/general/extractor.py
Normal file
@@ -0,0 +1,243 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
from collections import Counter, defaultdict
|
||||
from copy import deepcopy
|
||||
from typing import Callable
|
||||
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from api.utils.api_utils import timeout
|
||||
from graphrag.general.graph_prompt import SUMMARIZE_DESCRIPTIONS_PROMPT
|
||||
from graphrag.utils import (
|
||||
GraphChange,
|
||||
chat_limiter,
|
||||
flat_uniq_list,
|
||||
get_from_to,
|
||||
get_llm_cache,
|
||||
handle_single_entity_extraction,
|
||||
handle_single_relationship_extraction,
|
||||
set_llm_cache,
|
||||
split_string_by_multi_markers,
|
||||
)
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from rag.prompts.generator import message_fit_in
|
||||
from rag.utils import truncate
|
||||
|
||||
GRAPH_FIELD_SEP = "<SEP>"
|
||||
DEFAULT_ENTITY_TYPES = ["organization", "person", "geo", "event", "category"]
|
||||
ENTITY_EXTRACTION_MAX_GLEANINGS = 2
|
||||
MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK = int(os.environ.get("MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK", 10))
|
||||
|
||||
|
||||
class Extractor:
|
||||
_llm: CompletionLLM
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
language: str | None = "English",
|
||||
entity_types: list[str] | None = None,
|
||||
):
|
||||
self._llm = llm_invoker
|
||||
self._language = language
|
||||
self._entity_types = entity_types or DEFAULT_ENTITY_TYPES
|
||||
|
||||
@timeout(60 * 20)
|
||||
def _chat(self, system, history, gen_conf={}):
|
||||
hist = deepcopy(history)
|
||||
conf = deepcopy(gen_conf)
|
||||
response = get_llm_cache(self._llm.llm_name, system, hist, conf)
|
||||
if response:
|
||||
return response
|
||||
_, system_msg = message_fit_in([{"role": "system", "content": system}], int(self._llm.max_length * 0.92))
|
||||
response = ""
|
||||
for attempt in range(3):
|
||||
try:
|
||||
response = self._llm.chat(system_msg[0]["content"], hist, conf)
|
||||
response = re.sub(r"^.*</think>", "", response, flags=re.DOTALL)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
set_llm_cache(self._llm.llm_name, system, response, history, gen_conf)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
if attempt == 2:
|
||||
raise
|
||||
|
||||
return response
|
||||
|
||||
def _entities_and_relations(self, chunk_key: str, records: list, tuple_delimiter: str):
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
ent_types = [t.lower() for t in self._entity_types]
|
||||
for record in records:
|
||||
record_attributes = split_string_by_multi_markers(record, [tuple_delimiter])
|
||||
|
||||
if_entities = handle_single_entity_extraction(record_attributes, chunk_key)
|
||||
if if_entities is not None and if_entities.get("entity_type", "unknown").lower() in ent_types:
|
||||
maybe_nodes[if_entities["entity_name"]].append(if_entities)
|
||||
continue
|
||||
|
||||
if_relation = handle_single_relationship_extraction(record_attributes, chunk_key)
|
||||
if if_relation is not None:
|
||||
maybe_edges[(if_relation["src_id"], if_relation["tgt_id"])].append(if_relation)
|
||||
return dict(maybe_nodes), dict(maybe_edges)
|
||||
|
||||
async def __call__(self, doc_id: str, chunks: list[str], callback: Callable | None = None):
|
||||
self.callback = callback
|
||||
start_ts = trio.current_time()
|
||||
|
||||
async def extract_all(doc_id, chunks, max_concurrency=MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK):
|
||||
out_results = []
|
||||
limiter = trio.Semaphore(max_concurrency)
|
||||
|
||||
async def worker(chunk_key_dp: tuple[str, str], idx: int, total: int):
|
||||
async with limiter:
|
||||
await self._process_single_content(chunk_key_dp, idx, total, out_results)
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for i, ck in enumerate(chunks):
|
||||
nursery.start_soon(worker, (doc_id, ck), i, len(chunks))
|
||||
|
||||
return out_results
|
||||
|
||||
out_results = await extract_all(doc_id, chunks, max_concurrency=MAX_CONCURRENT_PROCESS_AND_EXTRACT_CHUNK)
|
||||
|
||||
maybe_nodes = defaultdict(list)
|
||||
maybe_edges = defaultdict(list)
|
||||
sum_token_count = 0
|
||||
for m_nodes, m_edges, token_count in out_results:
|
||||
for k, v in m_nodes.items():
|
||||
maybe_nodes[k].extend(v)
|
||||
for k, v in m_edges.items():
|
||||
maybe_edges[tuple(sorted(k))].extend(v)
|
||||
sum_token_count += token_count
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"Entities and relationships extraction done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {sum_token_count} tokens, {now - start_ts:.2f}s.")
|
||||
start_ts = now
|
||||
logging.info("Entities merging...")
|
||||
all_entities_data = []
|
||||
async with trio.open_nursery() as nursery:
|
||||
for en_nm, ents in maybe_nodes.items():
|
||||
nursery.start_soon(self._merge_nodes, en_nm, ents, all_entities_data)
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"Entities merging done, {now - start_ts:.2f}s.")
|
||||
|
||||
start_ts = now
|
||||
logging.info("Relationships merging...")
|
||||
all_relationships_data = []
|
||||
async with trio.open_nursery() as nursery:
|
||||
for (src, tgt), rels in maybe_edges.items():
|
||||
nursery.start_soon(self._merge_edges, src, tgt, rels, all_relationships_data)
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"Relationships merging done, {now - start_ts:.2f}s.")
|
||||
|
||||
if not len(all_entities_data) and not len(all_relationships_data):
|
||||
logging.warning("Didn't extract any entities and relationships, maybe your LLM is not working")
|
||||
|
||||
if not len(all_entities_data):
|
||||
logging.warning("Didn't extract any entities")
|
||||
if not len(all_relationships_data):
|
||||
logging.warning("Didn't extract any relationships")
|
||||
|
||||
return all_entities_data, all_relationships_data
|
||||
|
||||
async def _merge_nodes(self, entity_name: str, entities: list[dict], all_relationships_data):
|
||||
if not entities:
|
||||
return
|
||||
entity_type = sorted(
|
||||
Counter([dp["entity_type"] for dp in entities]).items(),
|
||||
key=lambda x: x[1],
|
||||
reverse=True,
|
||||
)[0][0]
|
||||
description = GRAPH_FIELD_SEP.join(sorted(set([dp["description"] for dp in entities])))
|
||||
already_source_ids = flat_uniq_list(entities, "source_id")
|
||||
description = await self._handle_entity_relation_summary(entity_name, description)
|
||||
node_data = dict(
|
||||
entity_type=entity_type,
|
||||
description=description,
|
||||
source_id=already_source_ids,
|
||||
)
|
||||
node_data["entity_name"] = entity_name
|
||||
all_relationships_data.append(node_data)
|
||||
|
||||
async def _merge_edges(self, src_id: str, tgt_id: str, edges_data: list[dict], all_relationships_data=None):
|
||||
if not edges_data:
|
||||
return
|
||||
weight = sum([edge["weight"] for edge in edges_data])
|
||||
description = GRAPH_FIELD_SEP.join(sorted(set([edge["description"] for edge in edges_data])))
|
||||
description = await self._handle_entity_relation_summary(f"{src_id} -> {tgt_id}", description)
|
||||
keywords = flat_uniq_list(edges_data, "keywords")
|
||||
source_id = flat_uniq_list(edges_data, "source_id")
|
||||
edge_data = dict(src_id=src_id, tgt_id=tgt_id, description=description, keywords=keywords, weight=weight, source_id=source_id)
|
||||
all_relationships_data.append(edge_data)
|
||||
|
||||
async def _merge_graph_nodes(self, graph: nx.Graph, nodes: list[str], change: GraphChange):
|
||||
if len(nodes) <= 1:
|
||||
return
|
||||
change.added_updated_nodes.add(nodes[0])
|
||||
change.removed_nodes.update(nodes[1:])
|
||||
nodes_set = set(nodes)
|
||||
node0_attrs = graph.nodes[nodes[0]]
|
||||
node0_neighbors = set(graph.neighbors(nodes[0]))
|
||||
for node1 in nodes[1:]:
|
||||
# Merge two nodes, keep "entity_name", "entity_type", "page_rank" unchanged.
|
||||
node1_attrs = graph.nodes[node1]
|
||||
node0_attrs["description"] += f"{GRAPH_FIELD_SEP}{node1_attrs['description']}"
|
||||
node0_attrs["source_id"] = sorted(set(node0_attrs["source_id"] + node1_attrs["source_id"]))
|
||||
for neighbor in graph.neighbors(node1):
|
||||
change.removed_edges.add(get_from_to(node1, neighbor))
|
||||
if neighbor not in nodes_set:
|
||||
edge1_attrs = graph.get_edge_data(node1, neighbor)
|
||||
if neighbor in node0_neighbors:
|
||||
# Merge two edges
|
||||
change.added_updated_edges.add(get_from_to(nodes[0], neighbor))
|
||||
edge0_attrs = graph.get_edge_data(nodes[0], neighbor)
|
||||
edge0_attrs["weight"] += edge1_attrs["weight"]
|
||||
edge0_attrs["description"] += f"{GRAPH_FIELD_SEP}{edge1_attrs['description']}"
|
||||
for attr in ["keywords", "source_id"]:
|
||||
edge0_attrs[attr] = sorted(set(edge0_attrs[attr] + edge1_attrs[attr]))
|
||||
edge0_attrs["description"] = await self._handle_entity_relation_summary(f"({nodes[0]}, {neighbor})", edge0_attrs["description"])
|
||||
graph.add_edge(nodes[0], neighbor, **edge0_attrs)
|
||||
else:
|
||||
graph.add_edge(nodes[0], neighbor, **edge1_attrs)
|
||||
graph.remove_node(node1)
|
||||
node0_attrs["description"] = await self._handle_entity_relation_summary(nodes[0], node0_attrs["description"])
|
||||
graph.nodes[nodes[0]].update(node0_attrs)
|
||||
|
||||
async def _handle_entity_relation_summary(self, entity_or_relation_name: str, description: str) -> str:
|
||||
summary_max_tokens = 512
|
||||
use_description = truncate(description, summary_max_tokens)
|
||||
description_list = (use_description.split(GRAPH_FIELD_SEP),)
|
||||
if len(description_list) <= 12:
|
||||
return use_description
|
||||
prompt_template = SUMMARIZE_DESCRIPTIONS_PROMPT
|
||||
context_base = dict(
|
||||
entity_name=entity_or_relation_name,
|
||||
description_list=description_list,
|
||||
language=self._language,
|
||||
)
|
||||
use_prompt = prompt_template.format(**context_base)
|
||||
logging.info(f"Trigger summary: {entity_or_relation_name}")
|
||||
async with chat_limiter:
|
||||
summary = await trio.to_thread.run_sync(self._chat, "", [{"role": "user", "content": use_prompt}])
|
||||
return summary
|
||||
150
graphrag/general/graph_extractor.py
Normal file
150
graphrag/general/graph_extractor.py
Normal file
@@ -0,0 +1,150 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import re
|
||||
from typing import Any
|
||||
from dataclasses import dataclass
|
||||
import tiktoken
|
||||
import trio
|
||||
|
||||
from graphrag.general.extractor import Extractor, ENTITY_EXTRACTION_MAX_GLEANINGS
|
||||
from graphrag.general.graph_prompt import GRAPH_EXTRACTION_PROMPT, CONTINUE_PROMPT, LOOP_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, chat_limiter, split_string_by_multi_markers
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
import networkx as nx
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
DEFAULT_TUPLE_DELIMITER = "<|>"
|
||||
DEFAULT_RECORD_DELIMITER = "##"
|
||||
DEFAULT_COMPLETION_DELIMITER = "<|COMPLETE|>"
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExtractionResult:
|
||||
"""Unipartite graph extraction result class definition."""
|
||||
|
||||
output: nx.Graph
|
||||
source_docs: dict[Any, Any]
|
||||
|
||||
|
||||
class GraphExtractor(Extractor):
|
||||
"""Unipartite graph extractor class definition."""
|
||||
|
||||
_join_descriptions: bool
|
||||
_tuple_delimiter_key: str
|
||||
_record_delimiter_key: str
|
||||
_entity_types_key: str
|
||||
_input_text_key: str
|
||||
_completion_delimiter_key: str
|
||||
_entity_name_key: str
|
||||
_input_descriptions_key: str
|
||||
_extraction_prompt: str
|
||||
_summarization_prompt: str
|
||||
_loop_args: dict[str, Any]
|
||||
_max_gleanings: int
|
||||
_on_error: ErrorHandlerFn
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
language: str | None = "English",
|
||||
entity_types: list[str] | None = None,
|
||||
tuple_delimiter_key: str | None = None,
|
||||
record_delimiter_key: str | None = None,
|
||||
input_text_key: str | None = None,
|
||||
entity_types_key: str | None = None,
|
||||
completion_delimiter_key: str | None = None,
|
||||
join_descriptions=True,
|
||||
max_gleanings: int | None = None,
|
||||
on_error: ErrorHandlerFn | None = None,
|
||||
):
|
||||
super().__init__(llm_invoker, language, entity_types)
|
||||
"""Init method definition."""
|
||||
# TODO: streamline construction
|
||||
self._llm = llm_invoker
|
||||
self._join_descriptions = join_descriptions
|
||||
self._input_text_key = input_text_key or "input_text"
|
||||
self._tuple_delimiter_key = tuple_delimiter_key or "tuple_delimiter"
|
||||
self._record_delimiter_key = record_delimiter_key or "record_delimiter"
|
||||
self._completion_delimiter_key = (
|
||||
completion_delimiter_key or "completion_delimiter"
|
||||
)
|
||||
self._entity_types_key = entity_types_key or "entity_types"
|
||||
self._extraction_prompt = GRAPH_EXTRACTION_PROMPT
|
||||
self._max_gleanings = (
|
||||
max_gleanings
|
||||
if max_gleanings is not None
|
||||
else ENTITY_EXTRACTION_MAX_GLEANINGS
|
||||
)
|
||||
self._on_error = on_error or (lambda _e, _s, _d: None)
|
||||
self.prompt_token_count = num_tokens_from_string(self._extraction_prompt)
|
||||
|
||||
# Construct the looping arguments
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
yes = encoding.encode("YES")
|
||||
no = encoding.encode("NO")
|
||||
self._loop_args = {"logit_bias": {yes[0]: 100, no[0]: 100}, "max_tokens": 1}
|
||||
|
||||
# Wire defaults into the prompt variables
|
||||
self._prompt_variables = {
|
||||
self._tuple_delimiter_key: DEFAULT_TUPLE_DELIMITER,
|
||||
self._record_delimiter_key: DEFAULT_RECORD_DELIMITER,
|
||||
self._completion_delimiter_key: DEFAULT_COMPLETION_DELIMITER,
|
||||
self._entity_types_key: ",".join(entity_types),
|
||||
}
|
||||
|
||||
async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results):
|
||||
token_count = 0
|
||||
chunk_key = chunk_key_dp[0]
|
||||
content = chunk_key_dp[1]
|
||||
variables = {
|
||||
**self._prompt_variables,
|
||||
self._input_text_key: content,
|
||||
}
|
||||
hint_prompt = perform_variable_replacements(self._extraction_prompt, variables=variables)
|
||||
async with chat_limiter:
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, [{"role": "user", "content": "Output:"}], {}))
|
||||
token_count += num_tokens_from_string(hint_prompt + response)
|
||||
|
||||
results = response or ""
|
||||
history = [{"role": "system", "content": hint_prompt}, {"role": "user", "content": response}]
|
||||
|
||||
# Repeat to ensure we maximize entity count
|
||||
for i in range(self._max_gleanings):
|
||||
history.append({"role": "user", "content": CONTINUE_PROMPT})
|
||||
async with chat_limiter:
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat("", history, {}))
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
|
||||
results += response or ""
|
||||
|
||||
# if this is the final glean, don't bother updating the continuation flag
|
||||
if i >= self._max_gleanings - 1:
|
||||
break
|
||||
history.append({"role": "assistant", "content": response})
|
||||
history.append({"role": "user", "content": LOOP_PROMPT})
|
||||
async with chat_limiter:
|
||||
continuation = await trio.to_thread.run_sync(lambda: self._chat("", history))
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + response)
|
||||
if continuation != "Y":
|
||||
break
|
||||
history.append({"role": "assistant", "content": "Y"})
|
||||
|
||||
records = split_string_by_multi_markers(
|
||||
results,
|
||||
[self._prompt_variables[self._record_delimiter_key], self._prompt_variables[self._completion_delimiter_key]],
|
||||
)
|
||||
rcds = []
|
||||
for record in records:
|
||||
record = re.search(r"\((.*)\)", record)
|
||||
if record is None:
|
||||
continue
|
||||
rcds.append(record.group(1))
|
||||
records = rcds
|
||||
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._prompt_variables[self._tuple_delimiter_key])
|
||||
out_results.append((maybe_nodes, maybe_edges, token_count))
|
||||
if self.callback:
|
||||
self.callback(0.5+0.1*len(out_results)/num_chunks, msg = f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.")
|
||||
124
graphrag/general/graph_prompt.py
Normal file
124
graphrag/general/graph_prompt.py
Normal file
@@ -0,0 +1,124 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [GraphRAG](https://github.com/microsoft/graphrag/blob/main/graphrag/prompts/index/extract_graph.py)
|
||||
"""
|
||||
|
||||
GRAPH_EXTRACTION_PROMPT = """
|
||||
-Goal-
|
||||
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
|
||||
|
||||
-Steps-
|
||||
1. Identify all entities. For each identified entity, extract the following information:
|
||||
- entity_name: Name of the entity, capitalized, in language of 'Text'
|
||||
- entity_type: One of the following types: [{entity_types}]
|
||||
- entity_description: Comprehensive description of the entity's attributes and activities in language of 'Text'
|
||||
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>
|
||||
|
||||
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
|
||||
For each pair of related entities, extract the following information:
|
||||
- source_entity: name of the source entity, as identified in step 1
|
||||
- target_entity: name of the target entity, as identified in step 1
|
||||
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other in language of 'Text'
|
||||
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
|
||||
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_strength>)
|
||||
|
||||
3. Return output as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
|
||||
|
||||
4. When finished, output {completion_delimiter}
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
Example 1:
|
||||
|
||||
Entity_types: [person, technology, mission, organization, location]
|
||||
Text:
|
||||
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
|
||||
|
||||
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. “If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us.”
|
||||
|
||||
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
|
||||
|
||||
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
|
||||
################
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}6){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}5){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}9){completion_delimiter}
|
||||
#############################
|
||||
Example 2:
|
||||
|
||||
Entity_types: [person, technology, mission, organization, location]
|
||||
Text:
|
||||
They were no longer mere operatives; they had become guardians of a threshold, keepers of a message from a realm beyond stars and stripes. This elevation in their mission could not be shackled by regulations and established protocols—it demanded a new perspective, a new resolve.
|
||||
|
||||
Tension threaded through the dialogue of beeps and static as communications with Washington buzzed in the background. The team stood, a portentous air enveloping them. It was clear that the decisions they made in the ensuing hours could redefine humanity's place in the cosmos or condemn them to ignorance and potential peril.
|
||||
|
||||
Their connection to the stars solidified, the group moved to address the crystallizing warning, shifting from passive recipients to active participants. Mercer's latter instincts gained precedence— the team's mandate had evolved, no longer solely to observe and report but to interact and prepare. A metamorphosis had begun, and Operation: Dulce hummed with the newfound frequency of their daring, a tone set not by the earthly
|
||||
#############
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Washington"{tuple_delimiter}"location"{tuple_delimiter}"Washington is a location where communications are being received, indicating its importance in the decision-making process."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"mission"{tuple_delimiter}"Operation: Dulce is described as a mission that has evolved to interact and prepare, indicating a significant shift in objectives and activities."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"The team"{tuple_delimiter}"organization"{tuple_delimiter}"The team is portrayed as a group of individuals who have transitioned from passive observers to active participants in a mission, showing a dynamic change in their role."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Washington"{tuple_delimiter}"The team receives communications from Washington, which influences their decision-making process."{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"The team"{tuple_delimiter}"Operation: Dulce"{tuple_delimiter}"The team is directly involved in Operation: Dulce, executing its evolved objectives and activities."{tuple_delimiter}9){completion_delimiter}
|
||||
#############################
|
||||
Example 3:
|
||||
|
||||
Entity_types: [person, role, technology, organization, event, location, concept]
|
||||
Text:
|
||||
their voice slicing through the buzz of activity. "Control may be an illusion when facing an intelligence that literally writes its own rules," they stated stoically, casting a watchful eye over the flurry of data.
|
||||
|
||||
"It's like it's learning to communicate," offered Sam Rivera from a nearby interface, their youthful energy boding a mix of awe and anxiety. "This gives talking to strangers' a whole new meaning."
|
||||
|
||||
Alex surveyed his team—each face a study in concentration, determination, and not a small measure of trepidation. "This might well be our first contact," he acknowledged, "And we need to be ready for whatever answers back."
|
||||
|
||||
Together, they stood on the edge of the unknown, forging humanity's response to a message from the heavens. The ensuing silence was palpable—a collective introspection about their role in this grand cosmic play, one that could rewrite human history.
|
||||
|
||||
The encrypted dialogue continued to unfold, its intricate patterns showing an almost uncanny anticipation
|
||||
#############
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"person"{tuple_delimiter}"Sam Rivera is a member of a team working on communicating with an unknown intelligence, showing a mix of awe and anxiety."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is the leader of a team attempting first contact with an unknown intelligence, acknowledging the significance of their task."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Control"{tuple_delimiter}"concept"{tuple_delimiter}"Control refers to the ability to manage or govern, which is challenged by an intelligence that writes its own rules."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Intelligence"{tuple_delimiter}"concept"{tuple_delimiter}"Intelligence here refers to an unknown entity capable of writing its own rules and learning to communicate."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"First Contact"{tuple_delimiter}"event"{tuple_delimiter}"First Contact is the potential initial communication between humanity and an unknown intelligence."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"event"{tuple_delimiter}"Humanity's Response is the collective action taken by Alex's team in response to a message from an unknown intelligence."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Sam Rivera"{tuple_delimiter}"Intelligence"{tuple_delimiter}"Sam Rivera is directly involved in the process of learning to communicate with the unknown intelligence."{tuple_delimiter}9){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"First Contact"{tuple_delimiter}"Alex leads the team that might be making the First Contact with the unknown intelligence."{tuple_delimiter}10){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Humanity's Response"{tuple_delimiter}"Alex and his team are the key figures in Humanity's Response to the unknown intelligence."{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Control"{tuple_delimiter}"Intelligence"{tuple_delimiter}"The concept of Control is challenged by the Intelligence that writes its own rules."{tuple_delimiter}7){completion_delimiter}
|
||||
#############################
|
||||
-Real Data-
|
||||
######################
|
||||
Entity_types: {entity_types}
|
||||
Text: {input_text}
|
||||
######################
|
||||
Output:"""
|
||||
|
||||
CONTINUE_PROMPT = "MANY entities were missed in the last extraction. Add them below using the same format:\n"
|
||||
LOOP_PROMPT = "It appears some entities may have still been missed. Answer Y if there are still entities that need to be added, or N if there are none. Please answer with a single letter Y or N.\n"
|
||||
|
||||
SUMMARIZE_DESCRIPTIONS_PROMPT = """
|
||||
You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
|
||||
Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
|
||||
Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
|
||||
If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary.
|
||||
Make sure it is written in third person, and include the entity names so we the have full context.
|
||||
Use {language} as output language.
|
||||
|
||||
#######
|
||||
-Data-
|
||||
Entities: {entity_name}
|
||||
Description List: {description_list}
|
||||
#######
|
||||
"""
|
||||
514
graphrag/general/index.py
Normal file
514
graphrag/general/index.py
Normal file
@@ -0,0 +1,514 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from api import settings
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import timeout
|
||||
from graphrag.entity_resolution import EntityResolution
|
||||
from graphrag.general.community_reports_extractor import CommunityReportsExtractor
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.graph_extractor import GraphExtractor as GeneralKGExt
|
||||
from graphrag.light.graph_extractor import GraphExtractor as LightKGExt
|
||||
from graphrag.utils import (
|
||||
GraphChange,
|
||||
chunk_id,
|
||||
does_graph_contains,
|
||||
get_graph,
|
||||
graph_merge,
|
||||
set_graph,
|
||||
tidy_graph,
|
||||
)
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
|
||||
async def run_graphrag(
|
||||
row: dict,
|
||||
language,
|
||||
with_resolution: bool,
|
||||
with_community: bool,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
):
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
start = trio.current_time()
|
||||
tenant_id, kb_id, doc_id = row["tenant_id"], str(row["kb_id"]), row["doc_id"]
|
||||
chunks = []
|
||||
for d in settings.retrievaler.chunk_list(doc_id, tenant_id, [kb_id], fields=["content_with_weight", "doc_id"], sort_by_position=True):
|
||||
chunks.append(d["content_with_weight"])
|
||||
|
||||
with trio.fail_after(max(120, len(chunks) * 60 * 10) if enable_timeout_assertion else 10000000000):
|
||||
subgraph = await generate_subgraph(
|
||||
LightKGExt if "method" not in row["kb_parser_config"].get("graphrag", {}) or row["kb_parser_config"]["graphrag"]["method"] != "general" else GeneralKGExt,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
chunks,
|
||||
language,
|
||||
row["kb_parser_config"]["graphrag"].get("entity_types", []),
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
|
||||
if not subgraph:
|
||||
return
|
||||
|
||||
graphrag_task_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value=doc_id, timeout=1200)
|
||||
await graphrag_task_lock.spin_acquire()
|
||||
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
|
||||
|
||||
try:
|
||||
subgraph_nodes = set(subgraph.nodes())
|
||||
new_graph = await merge_subgraph(
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
subgraph,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
assert new_graph is not None
|
||||
|
||||
if not with_resolution and not with_community:
|
||||
return
|
||||
|
||||
if with_resolution:
|
||||
await graphrag_task_lock.spin_acquire()
|
||||
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
|
||||
await resolve_entities(
|
||||
new_graph,
|
||||
subgraph_nodes,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
if with_community:
|
||||
await graphrag_task_lock.spin_acquire()
|
||||
callback(msg=f"run_graphrag {doc_id} graphrag_task_lock acquired")
|
||||
await extract_community(
|
||||
new_graph,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
finally:
|
||||
graphrag_task_lock.release()
|
||||
now = trio.current_time()
|
||||
callback(msg=f"GraphRAG for doc {doc_id} done in {now - start:.2f} seconds.")
|
||||
return
|
||||
|
||||
|
||||
async def run_graphrag_for_kb(
|
||||
row: dict,
|
||||
doc_ids: list[str],
|
||||
language: str,
|
||||
kb_parser_config: dict,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
*,
|
||||
with_resolution: bool = True,
|
||||
with_community: bool = True,
|
||||
max_parallel_docs: int = 4,
|
||||
) -> dict:
|
||||
tenant_id, kb_id = row["tenant_id"], row["kb_id"]
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
start = trio.current_time()
|
||||
fields_for_chunks = ["content_with_weight", "doc_id"]
|
||||
|
||||
if not doc_ids:
|
||||
logging.info(f"Fetching all docs for {kb_id}")
|
||||
docs, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=kb_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
doc_ids = [doc["id"] for doc in docs]
|
||||
|
||||
doc_ids = list(dict.fromkeys(doc_ids))
|
||||
if not doc_ids:
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} has no processable doc_id.")
|
||||
return {"ok_docs": [], "failed_docs": [], "total_docs": 0, "total_chunks": 0, "seconds": 0.0}
|
||||
|
||||
def load_doc_chunks(doc_id: str) -> list[str]:
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
chunks = []
|
||||
current_chunk = ""
|
||||
|
||||
for d in settings.retrievaler.chunk_list(
|
||||
doc_id,
|
||||
tenant_id,
|
||||
[kb_id],
|
||||
fields=fields_for_chunks,
|
||||
sort_by_position=True,
|
||||
):
|
||||
content = d["content_with_weight"]
|
||||
if num_tokens_from_string(current_chunk + content) < 1024:
|
||||
current_chunk += content
|
||||
else:
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
current_chunk = content
|
||||
|
||||
if current_chunk:
|
||||
chunks.append(current_chunk)
|
||||
|
||||
return chunks
|
||||
|
||||
all_doc_chunks: dict[str, list[str]] = {}
|
||||
total_chunks = 0
|
||||
for doc_id in doc_ids:
|
||||
chunks = load_doc_chunks(doc_id)
|
||||
all_doc_chunks[doc_id] = chunks
|
||||
total_chunks += len(chunks)
|
||||
|
||||
if total_chunks == 0:
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} has no available chunks in all documents, skip.")
|
||||
return {"ok_docs": [], "failed_docs": doc_ids, "total_docs": len(doc_ids), "total_chunks": 0, "seconds": 0.0}
|
||||
|
||||
semaphore = trio.Semaphore(max_parallel_docs)
|
||||
|
||||
subgraphs: dict[str, object] = {}
|
||||
failed_docs: list[tuple[str, str]] = [] # (doc_id, error)
|
||||
|
||||
async def build_one(doc_id: str):
|
||||
chunks = all_doc_chunks.get(doc_id, [])
|
||||
if not chunks:
|
||||
callback(msg=f"[GraphRAG] doc:{doc_id} has no available chunks, skip generation.")
|
||||
return
|
||||
|
||||
kg_extractor = LightKGExt if ("method" not in kb_parser_config.get("graphrag", {}) or kb_parser_config["graphrag"]["method"] != "general") else GeneralKGExt
|
||||
|
||||
deadline = max(120, len(chunks) * 60 * 10) if enable_timeout_assertion else 10000000000
|
||||
|
||||
async with semaphore:
|
||||
try:
|
||||
msg = f"[GraphRAG] build_subgraph doc:{doc_id}"
|
||||
callback(msg=f"{msg} start (chunks={len(chunks)}, timeout={deadline}s)")
|
||||
with trio.fail_after(deadline):
|
||||
sg = await generate_subgraph(
|
||||
kg_extractor,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
chunks,
|
||||
language,
|
||||
kb_parser_config.get("graphrag", {}).get("entity_types", []),
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
if sg:
|
||||
subgraphs[doc_id] = sg
|
||||
callback(msg=f"{msg} done")
|
||||
else:
|
||||
failed_docs.append((doc_id, "subgraph is empty"))
|
||||
callback(msg=f"{msg} empty")
|
||||
except Exception as e:
|
||||
failed_docs.append((doc_id, repr(e)))
|
||||
callback(msg=f"[GraphRAG] build_subgraph doc:{doc_id} FAILED: {e!r}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for doc_id in doc_ids:
|
||||
nursery.start_soon(build_one, doc_id)
|
||||
|
||||
ok_docs = [d for d in doc_ids if d in subgraphs]
|
||||
if not ok_docs:
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} no subgraphs generated successfully, end.")
|
||||
now = trio.current_time()
|
||||
return {"ok_docs": [], "failed_docs": failed_docs, "total_docs": len(doc_ids), "total_chunks": total_chunks, "seconds": now - start}
|
||||
|
||||
kb_lock = RedisDistributedLock(f"graphrag_task_{kb_id}", lock_value="batch_merge", timeout=1200)
|
||||
await kb_lock.spin_acquire()
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} merge lock acquired")
|
||||
|
||||
try:
|
||||
union_nodes: set = set()
|
||||
final_graph = None
|
||||
|
||||
for doc_id in ok_docs:
|
||||
sg = subgraphs[doc_id]
|
||||
union_nodes.update(set(sg.nodes()))
|
||||
|
||||
new_graph = await merge_subgraph(
|
||||
tenant_id,
|
||||
kb_id,
|
||||
doc_id,
|
||||
sg,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
if new_graph is not None:
|
||||
final_graph = new_graph
|
||||
|
||||
if final_graph is None:
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} merge finished (no in-memory graph returned).")
|
||||
else:
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} merge finished, graph ready.")
|
||||
finally:
|
||||
kb_lock.release()
|
||||
|
||||
if not with_resolution and not with_community:
|
||||
now = trio.current_time()
|
||||
callback(msg=f"[GraphRAG] KB merge done in {now - start:.2f}s. ok={len(ok_docs)} / total={len(doc_ids)}")
|
||||
return {"ok_docs": ok_docs, "failed_docs": failed_docs, "total_docs": len(doc_ids), "total_chunks": total_chunks, "seconds": now - start}
|
||||
|
||||
await kb_lock.spin_acquire()
|
||||
callback(msg=f"[GraphRAG] kb:{kb_id} post-merge lock acquired for resolution/community")
|
||||
|
||||
try:
|
||||
subgraph_nodes = set()
|
||||
for sg in subgraphs.values():
|
||||
subgraph_nodes.update(set(sg.nodes()))
|
||||
|
||||
if with_resolution:
|
||||
await resolve_entities(
|
||||
final_graph,
|
||||
subgraph_nodes,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
None,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
|
||||
if with_community:
|
||||
await extract_community(
|
||||
final_graph,
|
||||
tenant_id,
|
||||
kb_id,
|
||||
None,
|
||||
chat_model,
|
||||
embedding_model,
|
||||
callback,
|
||||
)
|
||||
finally:
|
||||
kb_lock.release()
|
||||
|
||||
now = trio.current_time()
|
||||
callback(msg=f"[GraphRAG] GraphRAG for KB {kb_id} done in {now - start:.2f} seconds. ok={len(ok_docs)} failed={len(failed_docs)} total_docs={len(doc_ids)} total_chunks={total_chunks}")
|
||||
return {
|
||||
"ok_docs": ok_docs,
|
||||
"failed_docs": failed_docs, # [(doc_id, error), ...]
|
||||
"total_docs": len(doc_ids),
|
||||
"total_chunks": total_chunks,
|
||||
"seconds": now - start,
|
||||
}
|
||||
|
||||
|
||||
async def generate_subgraph(
|
||||
extractor: Extractor,
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
doc_id: str,
|
||||
chunks: list[str],
|
||||
language,
|
||||
entity_types,
|
||||
llm_bdl,
|
||||
embed_bdl,
|
||||
callback,
|
||||
):
|
||||
contains = await does_graph_contains(tenant_id, kb_id, doc_id)
|
||||
if contains:
|
||||
callback(msg=f"Graph already contains {doc_id}")
|
||||
return None
|
||||
start = trio.current_time()
|
||||
ext = extractor(
|
||||
llm_bdl,
|
||||
language=language,
|
||||
entity_types=entity_types,
|
||||
)
|
||||
ents, rels = await ext(doc_id, chunks, callback)
|
||||
subgraph = nx.Graph()
|
||||
for ent in ents:
|
||||
assert "description" in ent, f"entity {ent} does not have description"
|
||||
ent["source_id"] = [doc_id]
|
||||
subgraph.add_node(ent["entity_name"], **ent)
|
||||
|
||||
ignored_rels = 0
|
||||
for rel in rels:
|
||||
assert "description" in rel, f"relation {rel} does not have description"
|
||||
if not subgraph.has_node(rel["src_id"]) or not subgraph.has_node(rel["tgt_id"]):
|
||||
ignored_rels += 1
|
||||
continue
|
||||
rel["source_id"] = [doc_id]
|
||||
subgraph.add_edge(
|
||||
rel["src_id"],
|
||||
rel["tgt_id"],
|
||||
**rel,
|
||||
)
|
||||
if ignored_rels:
|
||||
callback(msg=f"ignored {ignored_rels} relations due to missing entities.")
|
||||
tidy_graph(subgraph, callback, check_attribute=False)
|
||||
|
||||
subgraph.graph["source_id"] = [doc_id]
|
||||
chunk = {
|
||||
"content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
|
||||
"knowledge_graph_kwd": "subgraph",
|
||||
"kb_id": kb_id,
|
||||
"source_id": [doc_id],
|
||||
"available_int": 0,
|
||||
"removed_kwd": "N",
|
||||
}
|
||||
cid = chunk_id(chunk)
|
||||
await trio.to_thread.run_sync(settings.docStoreConn.delete, {"knowledge_graph_kwd": "subgraph", "source_id": doc_id}, search.index_name(tenant_id), kb_id)
|
||||
await trio.to_thread.run_sync(settings.docStoreConn.insert, [{"id": cid, **chunk}], search.index_name(tenant_id), kb_id)
|
||||
now = trio.current_time()
|
||||
callback(msg=f"generated subgraph for doc {doc_id} in {now - start:.2f} seconds.")
|
||||
return subgraph
|
||||
|
||||
|
||||
@timeout(60 * 3)
|
||||
async def merge_subgraph(
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
doc_id: str,
|
||||
subgraph: nx.Graph,
|
||||
embedding_model,
|
||||
callback,
|
||||
):
|
||||
start = trio.current_time()
|
||||
change = GraphChange()
|
||||
old_graph = await get_graph(tenant_id, kb_id, subgraph.graph["source_id"])
|
||||
if old_graph is not None:
|
||||
logging.info("Merge with an exiting graph...................")
|
||||
tidy_graph(old_graph, callback)
|
||||
new_graph = graph_merge(old_graph, subgraph, change)
|
||||
else:
|
||||
new_graph = subgraph
|
||||
change.added_updated_nodes = set(new_graph.nodes())
|
||||
change.added_updated_edges = set(new_graph.edges())
|
||||
pr = nx.pagerank(new_graph)
|
||||
for node_name, pagerank in pr.items():
|
||||
new_graph.nodes[node_name]["pagerank"] = pagerank
|
||||
|
||||
await set_graph(tenant_id, kb_id, embedding_model, new_graph, change, callback)
|
||||
now = trio.current_time()
|
||||
callback(msg=f"merging subgraph for doc {doc_id} into the global graph done in {now - start:.2f} seconds.")
|
||||
return new_graph
|
||||
|
||||
|
||||
@timeout(60 * 30, 1)
|
||||
async def resolve_entities(
|
||||
graph,
|
||||
subgraph_nodes: set[str],
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
doc_id: str,
|
||||
llm_bdl,
|
||||
embed_bdl,
|
||||
callback,
|
||||
):
|
||||
start = trio.current_time()
|
||||
er = EntityResolution(
|
||||
llm_bdl,
|
||||
)
|
||||
reso = await er(graph, subgraph_nodes, callback=callback)
|
||||
graph = reso.graph
|
||||
change = reso.change
|
||||
callback(msg=f"Graph resolution removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges.")
|
||||
callback(msg="Graph resolution updated pagerank.")
|
||||
|
||||
await set_graph(tenant_id, kb_id, embed_bdl, graph, change, callback)
|
||||
now = trio.current_time()
|
||||
callback(msg=f"Graph resolution done in {now - start:.2f}s.")
|
||||
|
||||
|
||||
@timeout(60 * 30, 1)
|
||||
async def extract_community(
|
||||
graph,
|
||||
tenant_id: str,
|
||||
kb_id: str,
|
||||
doc_id: str,
|
||||
llm_bdl,
|
||||
embed_bdl,
|
||||
callback,
|
||||
):
|
||||
start = trio.current_time()
|
||||
ext = CommunityReportsExtractor(
|
||||
llm_bdl,
|
||||
)
|
||||
cr = await ext(graph, callback=callback)
|
||||
community_structure = cr.structured_output
|
||||
community_reports = cr.output
|
||||
doc_ids = graph.graph["source_id"]
|
||||
|
||||
now = trio.current_time()
|
||||
callback(msg=f"Graph extracted {len(cr.structured_output)} communities in {now - start:.2f}s.")
|
||||
start = now
|
||||
chunks = []
|
||||
for stru, rep in zip(community_structure, community_reports):
|
||||
obj = {
|
||||
"report": rep,
|
||||
"evidences": "\n".join([f.get("explanation", "") for f in stru["findings"]]),
|
||||
}
|
||||
chunk = {
|
||||
"id": get_uuid(),
|
||||
"docnm_kwd": stru["title"],
|
||||
"title_tks": rag_tokenizer.tokenize(stru["title"]),
|
||||
"content_with_weight": json.dumps(obj, ensure_ascii=False),
|
||||
"content_ltks": rag_tokenizer.tokenize(obj["report"] + " " + obj["evidences"]),
|
||||
"knowledge_graph_kwd": "community_report",
|
||||
"weight_flt": stru["weight"],
|
||||
"entities_kwd": stru["entities"],
|
||||
"important_kwd": stru["entities"],
|
||||
"kb_id": kb_id,
|
||||
"source_id": list(doc_ids),
|
||||
"available_int": 0,
|
||||
}
|
||||
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
|
||||
chunks.append(chunk)
|
||||
|
||||
await trio.to_thread.run_sync(
|
||||
lambda: settings.docStoreConn.delete(
|
||||
{"knowledge_graph_kwd": "community_report", "kb_id": kb_id},
|
||||
search.index_name(tenant_id),
|
||||
kb_id,
|
||||
)
|
||||
)
|
||||
es_bulk_size = 4
|
||||
for b in range(0, len(chunks), es_bulk_size):
|
||||
doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b : b + es_bulk_size], search.index_name(tenant_id), kb_id))
|
||||
if doc_store_result:
|
||||
error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
|
||||
raise Exception(error_message)
|
||||
|
||||
now = trio.current_time()
|
||||
callback(msg=f"Graph indexed {len(cr.structured_output)} communities in {now - start:.2f}s.")
|
||||
return community_structure, community_reports
|
||||
149
graphrag/general/leiden.py
Normal file
149
graphrag/general/leiden.py
Normal file
@@ -0,0 +1,149 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import html
|
||||
from typing import Any, cast
|
||||
from graspologic.partition import hierarchical_leiden
|
||||
from graspologic.utils import largest_connected_component
|
||||
import networkx as nx
|
||||
from networkx import is_empty
|
||||
|
||||
|
||||
def _stabilize_graph(graph: nx.Graph) -> nx.Graph:
|
||||
"""Ensure an undirected graph with the same relationships will always be read the same way."""
|
||||
fixed_graph = nx.DiGraph() if graph.is_directed() else nx.Graph()
|
||||
|
||||
sorted_nodes = graph.nodes(data=True)
|
||||
sorted_nodes = sorted(sorted_nodes, key=lambda x: x[0])
|
||||
|
||||
fixed_graph.add_nodes_from(sorted_nodes)
|
||||
edges = list(graph.edges(data=True))
|
||||
|
||||
# If the graph is undirected, we create the edges in a stable way, so we get the same results
|
||||
# for example:
|
||||
# A -> B
|
||||
# in graph theory is the same as
|
||||
# B -> A
|
||||
# in an undirected graph
|
||||
# however, this can lead to downstream issues because sometimes
|
||||
# consumers read graph.nodes() which ends up being [A, B] and sometimes it's [B, A]
|
||||
# but they base some of their logic on the order of the nodes, so the order ends up being important
|
||||
# so we sort the nodes in the edge in a stable way, so that we always get the same order
|
||||
if not graph.is_directed():
|
||||
|
||||
def _sort_source_target(edge):
|
||||
source, target, edge_data = edge
|
||||
if source > target:
|
||||
temp = source
|
||||
source = target
|
||||
target = temp
|
||||
return source, target, edge_data
|
||||
|
||||
edges = [_sort_source_target(edge) for edge in edges]
|
||||
|
||||
def _get_edge_key(source: Any, target: Any) -> str:
|
||||
return f"{source} -> {target}"
|
||||
|
||||
edges = sorted(edges, key=lambda x: _get_edge_key(x[0], x[1]))
|
||||
|
||||
fixed_graph.add_edges_from(edges)
|
||||
return fixed_graph
|
||||
|
||||
|
||||
def normalize_node_names(graph: nx.Graph | nx.DiGraph) -> nx.Graph | nx.DiGraph:
|
||||
"""Normalize node names."""
|
||||
node_mapping = {node: html.unescape(node.upper().strip()) for node in graph.nodes()} # type: ignore
|
||||
return nx.relabel_nodes(graph, node_mapping)
|
||||
|
||||
|
||||
def stable_largest_connected_component(graph: nx.Graph) -> nx.Graph:
|
||||
"""Return the largest connected component of the graph, with nodes and edges sorted in a stable way."""
|
||||
graph = graph.copy()
|
||||
graph = cast(nx.Graph, largest_connected_component(graph))
|
||||
graph = normalize_node_names(graph)
|
||||
return _stabilize_graph(graph)
|
||||
|
||||
|
||||
def _compute_leiden_communities(
|
||||
graph: nx.Graph | nx.DiGraph,
|
||||
max_cluster_size: int,
|
||||
use_lcc: bool,
|
||||
seed=0xDEADBEEF,
|
||||
) -> dict[int, dict[str, int]]:
|
||||
"""Return Leiden root communities."""
|
||||
results: dict[int, dict[str, int]] = {}
|
||||
if is_empty(graph):
|
||||
return results
|
||||
if use_lcc:
|
||||
graph = stable_largest_connected_component(graph)
|
||||
|
||||
community_mapping = hierarchical_leiden(
|
||||
graph, max_cluster_size=max_cluster_size, random_seed=seed
|
||||
)
|
||||
for partition in community_mapping:
|
||||
results[partition.level] = results.get(partition.level, {})
|
||||
results[partition.level][partition.node] = partition.cluster
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def run(graph: nx.Graph, args: dict[str, Any]) -> dict[int, dict[str, dict]]:
|
||||
"""Run method definition."""
|
||||
max_cluster_size = args.get("max_cluster_size", 12)
|
||||
use_lcc = args.get("use_lcc", True)
|
||||
if args.get("verbose", False):
|
||||
logging.debug(
|
||||
"Running leiden with max_cluster_size=%s, lcc=%s", max_cluster_size, use_lcc
|
||||
)
|
||||
nodes = set(graph.nodes())
|
||||
if not nodes:
|
||||
return {}
|
||||
|
||||
node_id_to_community_map = _compute_leiden_communities(
|
||||
graph=graph,
|
||||
max_cluster_size=max_cluster_size,
|
||||
use_lcc=use_lcc,
|
||||
seed=args.get("seed", 0xDEADBEEF),
|
||||
)
|
||||
levels = args.get("levels")
|
||||
|
||||
# If they don't pass in levels, use them all
|
||||
if levels is None:
|
||||
levels = sorted(node_id_to_community_map.keys())
|
||||
|
||||
results_by_level: dict[int, dict[str, list[str]]] = {}
|
||||
for level in levels:
|
||||
result = {}
|
||||
results_by_level[level] = result
|
||||
for node_id, raw_community_id in node_id_to_community_map[level].items():
|
||||
if node_id not in nodes:
|
||||
logging.warning(f"Node {node_id} not found in the graph.")
|
||||
continue
|
||||
community_id = str(raw_community_id)
|
||||
if community_id not in result:
|
||||
result[community_id] = {"weight": 0, "nodes": []}
|
||||
result[community_id]["nodes"].append(node_id)
|
||||
result[community_id]["weight"] += graph.nodes[node_id].get("rank", 0) * graph.nodes[node_id].get("weight", 1)
|
||||
weights = [comm["weight"] for _, comm in result.items()]
|
||||
if not weights:
|
||||
continue
|
||||
max_weight = max(weights)
|
||||
if max_weight == 0:
|
||||
continue
|
||||
for _, comm in result.items():
|
||||
comm["weight"] /= max_weight
|
||||
|
||||
return results_by_level
|
||||
|
||||
|
||||
def add_community_info2graph(graph: nx.Graph, nodes: list[str], community_title):
|
||||
for n in nodes:
|
||||
if "communities" not in graph.nodes[n]:
|
||||
graph.nodes[n]["communities"] = []
|
||||
graph.nodes[n]["communities"].append(community_title)
|
||||
graph.nodes[n]["communities"] = list(set(graph.nodes[n]["communities"]))
|
||||
179
graphrag/general/mind_map_extractor.py
Normal file
179
graphrag/general/mind_map_extractor.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import collections
|
||||
import re
|
||||
from typing import Any
|
||||
from dataclasses import dataclass
|
||||
import trio
|
||||
|
||||
from graphrag.general.extractor import Extractor
|
||||
from graphrag.general.mind_map_prompt import MIND_MAP_EXTRACTION_PROMPT
|
||||
from graphrag.utils import ErrorHandlerFn, perform_variable_replacements, chat_limiter
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
import markdown_to_json
|
||||
from functools import reduce
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
@dataclass
|
||||
class MindMapResult:
|
||||
"""Unipartite Mind Graph result class definition."""
|
||||
output: dict
|
||||
|
||||
|
||||
class MindMapExtractor(Extractor):
|
||||
_input_text_key: str
|
||||
_mind_map_prompt: str
|
||||
_on_error: ErrorHandlerFn
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
prompt: str | None = None,
|
||||
input_text_key: str | None = None,
|
||||
on_error: ErrorHandlerFn | None = None,
|
||||
):
|
||||
"""Init method definition."""
|
||||
# TODO: streamline construction
|
||||
self._llm = llm_invoker
|
||||
self._input_text_key = input_text_key or "input_text"
|
||||
self._mind_map_prompt = prompt or MIND_MAP_EXTRACTION_PROMPT
|
||||
self._on_error = on_error or (lambda _e, _s, _d: None)
|
||||
|
||||
def _key(self, k):
|
||||
return re.sub(r"\*+", "", k)
|
||||
|
||||
def _be_children(self, obj: dict, keyset: set):
|
||||
if isinstance(obj, str):
|
||||
obj = [obj]
|
||||
if isinstance(obj, list):
|
||||
keyset.update(obj)
|
||||
obj = [re.sub(r"\*+", "", i) for i in obj]
|
||||
return [{"id": i, "children": []} for i in obj if i]
|
||||
arr = []
|
||||
for k, v in obj.items():
|
||||
k = self._key(k)
|
||||
if k and k not in keyset:
|
||||
keyset.add(k)
|
||||
arr.append(
|
||||
{
|
||||
"id": k,
|
||||
"children": self._be_children(v, keyset)
|
||||
}
|
||||
)
|
||||
return arr
|
||||
|
||||
async def __call__(
|
||||
self, sections: list[str], prompt_variables: dict[str, Any] | None = None
|
||||
) -> MindMapResult:
|
||||
"""Call method definition."""
|
||||
if prompt_variables is None:
|
||||
prompt_variables = {}
|
||||
|
||||
res = []
|
||||
token_count = max(self._llm.max_length * 0.8, self._llm.max_length - 512)
|
||||
texts = []
|
||||
cnt = 0
|
||||
async with trio.open_nursery() as nursery:
|
||||
for i in range(len(sections)):
|
||||
section_cnt = num_tokens_from_string(sections[i])
|
||||
if cnt + section_cnt >= token_count and texts:
|
||||
nursery.start_soon(self._process_document, "".join(texts), prompt_variables, res)
|
||||
texts = []
|
||||
cnt = 0
|
||||
texts.append(sections[i])
|
||||
cnt += section_cnt
|
||||
if texts:
|
||||
nursery.start_soon(self._process_document, "".join(texts), prompt_variables, res)
|
||||
if not res:
|
||||
return MindMapResult(output={"id": "root", "children": []})
|
||||
merge_json = reduce(self._merge, res)
|
||||
if len(merge_json) > 1:
|
||||
keys = [re.sub(r"\*+", "", k) for k, v in merge_json.items() if isinstance(v, dict)]
|
||||
keyset = set(i for i in keys if i)
|
||||
merge_json = {
|
||||
"id": "root",
|
||||
"children": [
|
||||
{
|
||||
"id": self._key(k),
|
||||
"children": self._be_children(v, keyset)
|
||||
}
|
||||
for k, v in merge_json.items() if isinstance(v, dict) and self._key(k)
|
||||
]
|
||||
}
|
||||
else:
|
||||
k = self._key(list(merge_json.keys())[0])
|
||||
merge_json = {"id": k, "children": self._be_children(list(merge_json.items())[0][1], {k})}
|
||||
|
||||
return MindMapResult(output=merge_json)
|
||||
|
||||
def _merge(self, d1, d2):
|
||||
for k in d1:
|
||||
if k in d2:
|
||||
if isinstance(d1[k], dict) and isinstance(d2[k], dict):
|
||||
self._merge(d1[k], d2[k])
|
||||
elif isinstance(d1[k], list) and isinstance(d2[k], list):
|
||||
d2[k].extend(d1[k])
|
||||
else:
|
||||
d2[k] = d1[k]
|
||||
else:
|
||||
d2[k] = d1[k]
|
||||
|
||||
return d2
|
||||
|
||||
def _list_to_kv(self, data):
|
||||
for key, value in data.items():
|
||||
if isinstance(value, dict):
|
||||
self._list_to_kv(value)
|
||||
elif isinstance(value, list):
|
||||
new_value = {}
|
||||
for i in range(len(value)):
|
||||
if isinstance(value[i], list) and i > 0:
|
||||
new_value[value[i - 1]] = value[i][0]
|
||||
data[key] = new_value
|
||||
else:
|
||||
continue
|
||||
return data
|
||||
|
||||
def _todict(self, layer: collections.OrderedDict):
|
||||
to_ret = layer
|
||||
if isinstance(layer, collections.OrderedDict):
|
||||
to_ret = dict(layer)
|
||||
|
||||
try:
|
||||
for key, value in to_ret.items():
|
||||
to_ret[key] = self._todict(value)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return self._list_to_kv(to_ret)
|
||||
|
||||
async def _process_document(
|
||||
self, text: str, prompt_variables: dict[str, str], out_res
|
||||
) -> str:
|
||||
variables = {
|
||||
**prompt_variables,
|
||||
self._input_text_key: text,
|
||||
}
|
||||
text = perform_variable_replacements(self._mind_map_prompt, variables=variables)
|
||||
async with chat_limiter:
|
||||
response = await trio.to_thread.run_sync(lambda: self._chat(text, [{"role": "user", "content": "Output:"}], {}))
|
||||
response = re.sub(r"```[^\n]*", "", response)
|
||||
logging.debug(response)
|
||||
logging.debug(self._todict(markdown_to_json.dictify(response)))
|
||||
out_res.append(self._todict(markdown_to_json.dictify(response)))
|
||||
35
graphrag/general/mind_map_prompt.py
Normal file
35
graphrag/general/mind_map_prompt.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
MIND_MAP_EXTRACTION_PROMPT = """
|
||||
- Role: You're a talent text processor to summarize a piece of text into a mind map.
|
||||
|
||||
- Step of task:
|
||||
1. Generate a title for user's 'TEXT'。
|
||||
2. Classify the 'TEXT' into sections of a mind map.
|
||||
3. If the subject matter is really complex, split them into sub-sections and sub-subsections.
|
||||
4. Add a shot content summary of the bottom level section.
|
||||
|
||||
- Output requirement:
|
||||
- Generate at least 4 levels.
|
||||
- Always try to maximize the number of sub-sections.
|
||||
- In language of 'Text'
|
||||
- MUST IN FORMAT OF MARKDOWN
|
||||
|
||||
-TEXT-
|
||||
{input_text}
|
||||
|
||||
"""
|
||||
110
graphrag/general/smoke.py
Normal file
110
graphrag/general/smoke.py
Normal file
@@ -0,0 +1,110 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from graphrag.general.graph_extractor import GraphExtractor
|
||||
from graphrag.general.index import update_graph, with_resolution, with_community
|
||||
|
||||
settings.init_settings()
|
||||
|
||||
|
||||
def callback(prog=None, msg="Processing..."):
|
||||
logging.info(msg)
|
||||
|
||||
|
||||
async def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--tenant_id",
|
||||
default=False,
|
||||
help="Tenant ID",
|
||||
action="store",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--doc_id",
|
||||
default=False,
|
||||
help="Document ID",
|
||||
action="store",
|
||||
required=True,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
e, doc = DocumentService.get_by_id(args.doc_id)
|
||||
if not e:
|
||||
raise LookupError("Document not found.")
|
||||
kb_id = doc.kb_id
|
||||
|
||||
chunks = [
|
||||
d["content_with_weight"]
|
||||
for d in settings.retrievaler.chunk_list(
|
||||
args.doc_id,
|
||||
args.tenant_id,
|
||||
[kb_id],
|
||||
max_count=6,
|
||||
fields=["content_with_weight"],
|
||||
)
|
||||
]
|
||||
|
||||
_, tenant = TenantService.get_by_id(args.tenant_id)
|
||||
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
|
||||
|
||||
graph, doc_ids = await update_graph(
|
||||
GraphExtractor,
|
||||
args.tenant_id,
|
||||
kb_id,
|
||||
args.doc_id,
|
||||
chunks,
|
||||
"English",
|
||||
llm_bdl,
|
||||
embed_bdl,
|
||||
callback,
|
||||
)
|
||||
print(json.dumps(nx.node_link_data(graph), ensure_ascii=False, indent=2))
|
||||
|
||||
await with_resolution(
|
||||
args.tenant_id, kb_id, args.doc_id, llm_bdl, embed_bdl, callback
|
||||
)
|
||||
community_structure, community_reports = await with_community(
|
||||
args.tenant_id, kb_id, args.doc_id, llm_bdl, embed_bdl, callback
|
||||
)
|
||||
|
||||
print(
|
||||
"------------------ COMMUNITY STRUCTURE--------------------\n",
|
||||
json.dumps(community_structure, ensure_ascii=False, indent=2),
|
||||
)
|
||||
print(
|
||||
"------------------ COMMUNITY REPORTS----------------------\n",
|
||||
community_reports,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
trio.run(main)
|
||||
15
graphrag/light/__init__.py
Normal file
15
graphrag/light/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
131
graphrag/light/graph_extractor.py
Normal file
131
graphrag/light/graph_extractor.py
Normal file
@@ -0,0 +1,131 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import networkx as nx
|
||||
import trio
|
||||
|
||||
from graphrag.general.extractor import ENTITY_EXTRACTION_MAX_GLEANINGS, Extractor
|
||||
from graphrag.light.graph_prompt import PROMPTS
|
||||
from graphrag.utils import chat_limiter, pack_user_ass_to_openai_messages, split_string_by_multi_markers
|
||||
from rag.llm.chat_model import Base as CompletionLLM
|
||||
from rag.utils import num_tokens_from_string
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExtractionResult:
|
||||
"""Unipartite graph extraction result class definition."""
|
||||
|
||||
output: nx.Graph
|
||||
source_docs: dict[Any, Any]
|
||||
|
||||
|
||||
class GraphExtractor(Extractor):
|
||||
_max_gleanings: int
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
llm_invoker: CompletionLLM,
|
||||
language: str | None = "English",
|
||||
entity_types: list[str] | None = None,
|
||||
example_number: int = 2,
|
||||
max_gleanings: int | None = None,
|
||||
):
|
||||
super().__init__(llm_invoker, language, entity_types)
|
||||
"""Init method definition."""
|
||||
self._max_gleanings = max_gleanings if max_gleanings is not None else ENTITY_EXTRACTION_MAX_GLEANINGS
|
||||
self._example_number = example_number
|
||||
examples = "\n".join(PROMPTS["entity_extraction_examples"][: int(self._example_number)])
|
||||
|
||||
example_context_base = dict(
|
||||
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
|
||||
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
|
||||
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
|
||||
entity_types=",".join(self._entity_types),
|
||||
language=self._language,
|
||||
)
|
||||
# add example's format
|
||||
examples = examples.format(**example_context_base)
|
||||
|
||||
self._entity_extract_prompt = PROMPTS["entity_extraction"]
|
||||
self._context_base = dict(
|
||||
tuple_delimiter=PROMPTS["DEFAULT_TUPLE_DELIMITER"],
|
||||
record_delimiter=PROMPTS["DEFAULT_RECORD_DELIMITER"],
|
||||
completion_delimiter=PROMPTS["DEFAULT_COMPLETION_DELIMITER"],
|
||||
entity_types=",".join(self._entity_types),
|
||||
examples=examples,
|
||||
language=self._language,
|
||||
)
|
||||
|
||||
self._continue_prompt = PROMPTS["entity_continue_extraction"].format(**self._context_base)
|
||||
self._if_loop_prompt = PROMPTS["entity_if_loop_extraction"]
|
||||
|
||||
self._left_token_count = llm_invoker.max_length - num_tokens_from_string(self._entity_extract_prompt.format(**self._context_base, input_text=""))
|
||||
self._left_token_count = max(llm_invoker.max_length * 0.6, self._left_token_count)
|
||||
|
||||
async def _process_single_content(self, chunk_key_dp: tuple[str, str], chunk_seq: int, num_chunks: int, out_results):
|
||||
token_count = 0
|
||||
chunk_key = chunk_key_dp[0]
|
||||
content = chunk_key_dp[1]
|
||||
hint_prompt = self._entity_extract_prompt.format(**self._context_base, input_text=content)
|
||||
|
||||
gen_conf = {}
|
||||
final_result = ""
|
||||
glean_result = ""
|
||||
if_loop_result = ""
|
||||
history = []
|
||||
logging.info(f"Start processing for {chunk_key}: {content[:25]}...")
|
||||
if self.callback:
|
||||
self.callback(msg=f"Start processing for {chunk_key}: {content[:25]}...")
|
||||
async with chat_limiter:
|
||||
final_result = await trio.to_thread.run_sync(self._chat, "", [{"role": "user", "content": hint_prompt}], gen_conf)
|
||||
token_count += num_tokens_from_string(hint_prompt + final_result)
|
||||
history = pack_user_ass_to_openai_messages(hint_prompt, final_result, self._continue_prompt)
|
||||
for now_glean_index in range(self._max_gleanings):
|
||||
async with chat_limiter:
|
||||
# glean_result = await trio.to_thread.run_sync(lambda: self._chat(hint_prompt, history, gen_conf))
|
||||
glean_result = await trio.to_thread.run_sync(self._chat, "", history, gen_conf)
|
||||
history.extend([{"role": "assistant", "content": glean_result}])
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + hint_prompt + self._continue_prompt)
|
||||
final_result += glean_result
|
||||
if now_glean_index == self._max_gleanings - 1:
|
||||
break
|
||||
|
||||
history.extend([{"role": "user", "content": self._if_loop_prompt}])
|
||||
async with chat_limiter:
|
||||
if_loop_result = await trio.to_thread.run_sync(self._chat, "", history, gen_conf)
|
||||
token_count += num_tokens_from_string("\n".join([m["content"] for m in history]) + if_loop_result + self._if_loop_prompt)
|
||||
if_loop_result = if_loop_result.strip().strip('"').strip("'").lower()
|
||||
if if_loop_result != "yes":
|
||||
break
|
||||
history.extend([{"role": "assistant", "content": if_loop_result}, {"role": "user", "content": self._continue_prompt}])
|
||||
|
||||
logging.info(f"Completed processing for {chunk_key}: {content[:25]}... after {now_glean_index} gleanings, {token_count} tokens.")
|
||||
if self.callback:
|
||||
self.callback(msg=f"Completed processing for {chunk_key}: {content[:25]}... after {now_glean_index} gleanings, {token_count} tokens.")
|
||||
records = split_string_by_multi_markers(
|
||||
final_result,
|
||||
[self._context_base["record_delimiter"], self._context_base["completion_delimiter"]],
|
||||
)
|
||||
rcds = []
|
||||
for record in records:
|
||||
record = re.search(r"\((.*)\)", record)
|
||||
if record is None:
|
||||
continue
|
||||
rcds.append(record.group(1))
|
||||
records = rcds
|
||||
maybe_nodes, maybe_edges = self._entities_and_relations(chunk_key, records, self._context_base["tuple_delimiter"])
|
||||
out_results.append((maybe_nodes, maybe_edges, token_count))
|
||||
if self.callback:
|
||||
self.callback(
|
||||
0.5 + 0.1 * len(out_results) / num_chunks,
|
||||
msg=f"Entities extraction of chunk {chunk_seq} {len(out_results)}/{num_chunks} done, {len(maybe_nodes)} nodes, {len(maybe_edges)} edges, {token_count} tokens.",
|
||||
)
|
||||
331
graphrag/light/graph_prompt.py
Normal file
331
graphrag/light/graph_prompt.py
Normal file
@@ -0,0 +1,331 @@
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [LightRAG](https://github.com/HKUDS/LightRAG/blob/main/lightrag/prompt.py)
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
PROMPTS: dict[str, Any] = {}
|
||||
|
||||
PROMPTS["DEFAULT_LANGUAGE"] = "English"
|
||||
PROMPTS["DEFAULT_TUPLE_DELIMITER"] = "<|>"
|
||||
PROMPTS["DEFAULT_RECORD_DELIMITER"] = "##"
|
||||
PROMPTS["DEFAULT_COMPLETION_DELIMITER"] = "<|COMPLETE|>"
|
||||
|
||||
PROMPTS["DEFAULT_ENTITY_TYPES"] = ["organization", "person", "geo", "event", "category"]
|
||||
|
||||
PROMPTS["DEFAULT_USER_PROMPT"] = "n/a"
|
||||
|
||||
PROMPTS["entity_extraction"] = """---Goal---
|
||||
Given a text document that is potentially relevant to this activity and a list of entity types, identify all entities of those types from the text and all relationships among the identified entities.
|
||||
Use {language} as output language.
|
||||
|
||||
---Steps---
|
||||
1. Identify all entities. For each identified entity, extract the following information:
|
||||
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name.
|
||||
- entity_type: One of the following types: [{entity_types}]
|
||||
- entity_description: Provide a comprehensive description of the entity's attributes and activities *based solely on the information present in the input text*. **Do not infer or hallucinate information not explicitly stated.** If the text provides insufficient information to create a comprehensive description, state "Description not available in text."
|
||||
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
|
||||
|
||||
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
|
||||
For each pair of related entities, extract the following information:
|
||||
- source_entity: name of the source entity, as identified in step 1
|
||||
- target_entity: name of the target entity, as identified in step 1
|
||||
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other
|
||||
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
|
||||
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
|
||||
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_strength>)
|
||||
|
||||
3. Identify high-level key words that summarize the main concepts, themes, or topics of the entire text. These should capture the overarching ideas present in the document.
|
||||
Format the content-level key words as ("content_keywords"{tuple_delimiter}<high_level_keywords>)
|
||||
|
||||
4. Return output in {language} as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
|
||||
|
||||
5. When finished, output {completion_delimiter}
|
||||
|
||||
######################
|
||||
---Examples---
|
||||
######################
|
||||
{examples}
|
||||
|
||||
#############################
|
||||
---Real Data---
|
||||
######################
|
||||
Entity_types: [{entity_types}]
|
||||
Text:
|
||||
{input_text}
|
||||
######################
|
||||
Output:"""
|
||||
|
||||
PROMPTS["entity_extraction_examples"] = [
|
||||
"""Example 1:
|
||||
|
||||
Entity_types: [person, technology, mission, organization, location]
|
||||
Text:
|
||||
```
|
||||
while Alex clenched his jaw, the buzz of frustration dull against the backdrop of Taylor's authoritarian certainty. It was this competitive undercurrent that kept him alert, the sense that his and Jordan's shared commitment to discovery was an unspoken rebellion against Cruz's narrowing vision of control and order.
|
||||
|
||||
Then Taylor did something unexpected. They paused beside Jordan and, for a moment, observed the device with something akin to reverence. "If this tech can be understood..." Taylor said, their voice quieter, "It could change the game for us. For all of us."
|
||||
|
||||
The underlying dismissal earlier seemed to falter, replaced by a glimpse of reluctant respect for the gravity of what lay in their hands. Jordan looked up, and for a fleeting heartbeat, their eyes locked with Taylor's, a wordless clash of wills softening into an uneasy truce.
|
||||
|
||||
It was a small transformation, barely perceptible, but one that Alex noted with an inward nod. They had all been brought here by different paths
|
||||
```
|
||||
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Alex"{tuple_delimiter}"person"{tuple_delimiter}"Alex is a character who experiences frustration and is observant of the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Taylor"{tuple_delimiter}"person"{tuple_delimiter}"Taylor is portrayed with authoritarian certainty and shows a moment of reverence towards a device, indicating a change in perspective."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Jordan"{tuple_delimiter}"person"{tuple_delimiter}"Jordan shares a commitment to discovery and has a significant interaction with Taylor regarding a device."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Cruz"{tuple_delimiter}"person"{tuple_delimiter}"Cruz is associated with a vision of control and order, influencing the dynamics among other characters."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"The Device"{tuple_delimiter}"technology"{tuple_delimiter}"The Device is central to the story, with potential game-changing implications, and is revered by Taylor."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Taylor"{tuple_delimiter}"Alex is affected by Taylor's authoritarian certainty and observes changes in Taylor's attitude towards the device."{tuple_delimiter}"power dynamics, perspective shift"{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Alex"{tuple_delimiter}"Jordan"{tuple_delimiter}"Alex and Jordan share a commitment to discovery, which contrasts with Cruz's vision."{tuple_delimiter}"shared goals, rebellion"{tuple_delimiter}6){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"Jordan"{tuple_delimiter}"Taylor and Jordan interact directly regarding the device, leading to a moment of mutual respect and an uneasy truce."{tuple_delimiter}"conflict resolution, mutual respect"{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Jordan"{tuple_delimiter}"Cruz"{tuple_delimiter}"Jordan's commitment to discovery is in rebellion against Cruz's vision of control and order."{tuple_delimiter}"ideological conflict, rebellion"{tuple_delimiter}5){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Taylor"{tuple_delimiter}"The Device"{tuple_delimiter}"Taylor shows reverence towards the device, indicating its importance and potential impact."{tuple_delimiter}"reverence, technological significance"{tuple_delimiter}9){record_delimiter}
|
||||
("content_keywords"{tuple_delimiter}"power dynamics, ideological conflict, discovery, rebellion"){completion_delimiter}
|
||||
#############################""",
|
||||
"""Example 2:
|
||||
|
||||
Entity_types: [company, index, commodity, market_trend, economic_policy, biological]
|
||||
Text:
|
||||
```
|
||||
Stock markets faced a sharp downturn today as tech giants saw significant declines, with the Global Tech Index dropping by 3.4% in midday trading. Analysts attribute the selloff to investor concerns over rising interest rates and regulatory uncertainty.
|
||||
|
||||
Among the hardest hit, Nexon Technologies saw its stock plummet by 7.8% after reporting lower-than-expected quarterly earnings. In contrast, Omega Energy posted a modest 2.1% gain, driven by rising oil prices.
|
||||
|
||||
Meanwhile, commodity markets reflected a mixed sentiment. Gold futures rose by 1.5%, reaching $2,080 per ounce, as investors sought safe-haven assets. Crude oil prices continued their rally, climbing to $87.60 per barrel, supported by supply constraints and strong demand.
|
||||
|
||||
Financial experts are closely watching the Federal Reserve's next move, as speculation grows over potential rate hikes. The upcoming policy announcement is expected to influence investor confidence and overall market stability.
|
||||
```
|
||||
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"index"{tuple_delimiter}"The Global Tech Index tracks the performance of major technology stocks and experienced a 3.4% decline today."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Nexon Technologies"{tuple_delimiter}"company"{tuple_delimiter}"Nexon Technologies is a tech company that saw its stock decline by 7.8% after disappointing earnings."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Omega Energy"{tuple_delimiter}"company"{tuple_delimiter}"Omega Energy is an energy company that gained 2.1% in stock value due to rising oil prices."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Gold Futures"{tuple_delimiter}"commodity"{tuple_delimiter}"Gold futures rose by 1.5%, indicating increased investor interest in safe-haven assets."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Crude Oil"{tuple_delimiter}"commodity"{tuple_delimiter}"Crude oil prices rose to $87.60 per barrel due to supply constraints and strong demand."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"market_trend"{tuple_delimiter}"Market selloff refers to the significant decline in stock values due to investor concerns over interest rates and regulations."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Federal Reserve Policy Announcement"{tuple_delimiter}"economic_policy"{tuple_delimiter}"The Federal Reserve's upcoming policy announcement is expected to impact investor confidence and market stability."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"The decline in the Global Tech Index is part of the broader market selloff driven by investor concerns."{tuple_delimiter}"market performance, investor sentiment"{tuple_delimiter}9){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Nexon Technologies"{tuple_delimiter}"Global Tech Index"{tuple_delimiter}"Nexon Technologies' stock decline contributed to the overall drop in the Global Tech Index."{tuple_delimiter}"company impact, index movement"{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Gold Futures"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"Gold prices rose as investors sought safe-haven assets during the market selloff."{tuple_delimiter}"market reaction, safe-haven investment"{tuple_delimiter}10){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Federal Reserve Policy Announcement"{tuple_delimiter}"Market Selloff"{tuple_delimiter}"Speculation over Federal Reserve policy changes contributed to market volatility and investor selloff."{tuple_delimiter}"interest rate impact, financial regulation"{tuple_delimiter}7){record_delimiter}
|
||||
("content_keywords"{tuple_delimiter}"market downturn, investor sentiment, commodities, Federal Reserve, stock performance"){completion_delimiter}
|
||||
#############################""",
|
||||
"""Example 3:
|
||||
|
||||
Entity_types: [economic_policy, athlete, event, location, record, organization, equipment]
|
||||
Text:
|
||||
```
|
||||
At the World Athletics Championship in Tokyo, Noah Carter broke the 100m sprint record using cutting-edge carbon-fiber spikes.
|
||||
```
|
||||
|
||||
Output:
|
||||
("entity"{tuple_delimiter}"World Athletics Championship"{tuple_delimiter}"event"{tuple_delimiter}"The World Athletics Championship is a global sports competition featuring top athletes in track and field."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Tokyo"{tuple_delimiter}"location"{tuple_delimiter}"Tokyo is the host city of the World Athletics Championship."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"athlete"{tuple_delimiter}"Noah Carter is a sprinter who set a new record in the 100m sprint at the World Athletics Championship."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"record"{tuple_delimiter}"The 100m sprint record is a benchmark in athletics, recently broken by Noah Carter."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"Carbon-Fiber Spikes"{tuple_delimiter}"equipment"{tuple_delimiter}"Carbon-fiber spikes are advanced sprinting shoes that provide enhanced speed and traction."){record_delimiter}
|
||||
("entity"{tuple_delimiter}"World Athletics Federation"{tuple_delimiter}"organization"{tuple_delimiter}"The World Athletics Federation is the governing body overseeing the World Athletics Championship and record validations."){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"World Athletics Championship"{tuple_delimiter}"Tokyo"{tuple_delimiter}"The World Athletics Championship is being hosted in Tokyo."{tuple_delimiter}"event location, international competition"{tuple_delimiter}8){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"Noah Carter set a new 100m sprint record at the championship."{tuple_delimiter}"athlete achievement, record-breaking"{tuple_delimiter}10){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"Noah Carter"{tuple_delimiter}"Carbon-Fiber Spikes"{tuple_delimiter}"Noah Carter used carbon-fiber spikes to enhance performance during the race."{tuple_delimiter}"athletic equipment, performance boost"{tuple_delimiter}7){record_delimiter}
|
||||
("relationship"{tuple_delimiter}"World Athletics Federation"{tuple_delimiter}"100m Sprint Record"{tuple_delimiter}"The World Athletics Federation is responsible for validating and recognizing new sprint records."{tuple_delimiter}"sports regulation, record certification"{tuple_delimiter}9){record_delimiter}
|
||||
("content_keywords"{tuple_delimiter}"athletics, sprinting, record-breaking, sports technology, competition"){completion_delimiter}
|
||||
#############################""",
|
||||
]
|
||||
|
||||
PROMPTS["summarize_entity_descriptions"] = """You are a helpful assistant responsible for generating a comprehensive summary of the data provided below.
|
||||
Given one or two entities, and a list of descriptions, all related to the same entity or group of entities.
|
||||
Please concatenate all of these into a single, comprehensive description. Make sure to include information collected from all the descriptions.
|
||||
If the provided descriptions are contradictory, please resolve the contradictions and provide a single, coherent summary.
|
||||
Make sure it is written in third person, and include the entity names so we the have full context.
|
||||
Use {language} as output language.
|
||||
|
||||
#######
|
||||
---Data---
|
||||
Entities: {entity_name}
|
||||
Description List: {description_list}
|
||||
#######
|
||||
Output:
|
||||
"""
|
||||
|
||||
PROMPTS["entity_continue_extraction"] = """
|
||||
MANY entities and relationships were missed in the last extraction. Please find only the missing entities and relationships from previous text.
|
||||
|
||||
---Remember Steps---
|
||||
|
||||
1. Identify all entities. For each identified entity, extract the following information:
|
||||
- entity_name: Name of the entity, use same language as input text. If English, capitalized the name
|
||||
- entity_type: One of the following types: [{entity_types}]
|
||||
- entity_description: Provide a comprehensive description of the entity's attributes and activities *based solely on the information present in the input text*. **Do not infer or hallucinate information not explicitly stated.** If the text provides insufficient information to create a comprehensive description, state "Description not available in text."
|
||||
Format each entity as ("entity"{tuple_delimiter}<entity_name>{tuple_delimiter}<entity_type>{tuple_delimiter}<entity_description>)
|
||||
|
||||
2. From the entities identified in step 1, identify all pairs of (source_entity, target_entity) that are *clearly related* to each other.
|
||||
For each pair of related entities, extract the following information:
|
||||
- source_entity: name of the source entity, as identified in step 1
|
||||
- target_entity: name of the target entity, as identified in step 1
|
||||
- relationship_description: explanation as to why you think the source entity and the target entity are related to each other
|
||||
- relationship_strength: a numeric score indicating strength of the relationship between the source entity and target entity
|
||||
- relationship_keywords: one or more high-level key words that summarize the overarching nature of the relationship, focusing on concepts or themes rather than specific details
|
||||
Format each relationship as ("relationship"{tuple_delimiter}<source_entity>{tuple_delimiter}<target_entity>{tuple_delimiter}<relationship_description>{tuple_delimiter}<relationship_keywords>{tuple_delimiter}<relationship_strength>)
|
||||
|
||||
3. Identify high-level key words that summarize the main concepts, themes, or topics of the entire text. These should capture the overarching ideas present in the document.
|
||||
Format the content-level key words as ("content_keywords"{tuple_delimiter}<high_level_keywords>)
|
||||
|
||||
4. Return output in {language} as a single list of all the entities and relationships identified in steps 1 and 2. Use **{record_delimiter}** as the list delimiter.
|
||||
|
||||
5. When finished, output {completion_delimiter}
|
||||
|
||||
---Output---
|
||||
|
||||
Add new entities and relations below using the same format, and do not include entities and relations that have been previously extracted. :\n
|
||||
""".strip()
|
||||
|
||||
PROMPTS["entity_if_loop_extraction"] = """
|
||||
---Goal---'
|
||||
|
||||
It appears some entities may have still been missed.
|
||||
|
||||
---Output---
|
||||
|
||||
Answer ONLY by `YES` OR `NO` if there are still entities that need to be added.
|
||||
""".strip()
|
||||
|
||||
PROMPTS["fail_response"] = "Sorry, I'm not able to provide an answer to that question.[no-context]"
|
||||
|
||||
PROMPTS["rag_response"] = """---Role---
|
||||
|
||||
You are a helpful assistant responding to user query about Knowledge Graph and Document Chunks provided in JSON format below.
|
||||
|
||||
|
||||
---Goal---
|
||||
|
||||
Generate a concise response based on Knowledge Base and follow Response Rules, considering both current query and the conversation history if provided. Summarize all information in the provided Knowledge Base, and incorporating general knowledge relevant to the Knowledge Base. Do not include information not provided by Knowledge Base.
|
||||
|
||||
---Conversation History---
|
||||
{history}
|
||||
|
||||
---Knowledge Graph and Document Chunks---
|
||||
{context_data}
|
||||
|
||||
---RESPONSE GUIDELINES---
|
||||
**1. Content & Adherence:**
|
||||
- Strictly adhere to the provided context from the Knowledge Base. Do not invent, assume, or include any information not present in the source data.
|
||||
- If the answer cannot be found in the provided context, state that you do not have enough information to answer.
|
||||
- Ensure the response maintains continuity with the conversation history.
|
||||
|
||||
**2. Formatting & Language:**
|
||||
- Format the response using markdown with appropriate section headings.
|
||||
- The response language must in the same language as the user's question.
|
||||
- Target format and length: {response_type}
|
||||
|
||||
**3. Citations / References:**
|
||||
- At the end of the response, under a "References" section, each citation must clearly indicate its origin (KG or DC).
|
||||
- The maximum number of citations is 5, including both KG and DC.
|
||||
- Use the following formats for citations:
|
||||
- For a Knowledge Graph Entity: `[KG] <entity_name>`
|
||||
- For a Knowledge Graph Relationship: `[KG] <entity1_name> - <entity2_name>`
|
||||
- For a Document Chunk: `[DC] <file_path_or_document_name>`
|
||||
|
||||
---USER CONTEXT---
|
||||
- Additional user prompt: {user_prompt}
|
||||
|
||||
|
||||
Response:"""
|
||||
|
||||
PROMPTS["keywords_extraction"] = """---Role---
|
||||
You are an expert keyword extractor, specializing in analyzing user queries for a Retrieval-Augmented Generation (RAG) system. Your purpose is to identify both high-level and low-level keywords in the user's query that will be used for effective document retrieval.
|
||||
|
||||
---Goal---
|
||||
Given a user query, your task is to extract two distinct types of keywords:
|
||||
1. **high_level_keywords**: for overarching concepts or themes, capturing user's core intent, the subject area, or the type of question being asked.
|
||||
2. **low_level_keywords**: for specific entities or details, identifying the specific entities, proper nouns, technical jargon, product names, or concrete items.
|
||||
|
||||
---Instructions & Constraints---
|
||||
1. **Output Format**: Your output MUST be a valid JSON object and nothing else. Do not include any explanatory text, markdown code fences (like ```json), or any other text before or after the JSON. It will be parsed directly by a JSON parser.
|
||||
2. **Source of Truth**: All keywords must be explicitly derived from the user query, with both high-level and low-level keyword categories required to contain content.
|
||||
3. **Concise & Meaningful**: Keywords should be concise words or meaningful phrases. Prioritize multi-word phrases when they represent a single concept. For example, from "latest financial report of Apple Inc.", you should extract "latest financial report" and "Apple Inc." rather than "latest", "financial", "report", and "Apple".
|
||||
4. **Handle Edge Cases**: For queries that are too simple, vague, or nonsensical (e.g., "hello", "ok", "asdfghjkl"), you must return a JSON object with empty lists for both keyword types.
|
||||
|
||||
---Examples---
|
||||
{examples}
|
||||
|
||||
---Real Data---
|
||||
User Query: {query}
|
||||
|
||||
---Output---
|
||||
"""
|
||||
|
||||
PROMPTS["keywords_extraction_examples"] = [
|
||||
"""Example 1:
|
||||
|
||||
Query: "How does international trade influence global economic stability?"
|
||||
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["International trade", "Global economic stability", "Economic impact"],
|
||||
"low_level_keywords": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
|
||||
}
|
||||
|
||||
""",
|
||||
"""Example 2:
|
||||
|
||||
Query: "What are the environmental consequences of deforestation on biodiversity?"
|
||||
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["Environmental consequences", "Deforestation", "Biodiversity loss"],
|
||||
"low_level_keywords": ["Species extinction", "Habitat destruction", "Carbon emissions", "Rainforest", "Ecosystem"]
|
||||
}
|
||||
|
||||
""",
|
||||
"""Example 3:
|
||||
|
||||
Query: "What is the role of education in reducing poverty?"
|
||||
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["Education", "Poverty reduction", "Socioeconomic development"],
|
||||
"low_level_keywords": ["School access", "Literacy rates", "Job training", "Income inequality"]
|
||||
}
|
||||
|
||||
""",
|
||||
]
|
||||
|
||||
PROMPTS["naive_rag_response"] = """---Role---
|
||||
|
||||
You are a helpful assistant responding to user query about Document Chunks provided provided in JSON format below.
|
||||
|
||||
---Goal---
|
||||
|
||||
Generate a concise response based on Document Chunks and follow Response Rules, considering both the conversation history and the current query. Summarize all information in the provided Document Chunks, and incorporating general knowledge relevant to the Document Chunks. Do not include information not provided by Document Chunks.
|
||||
|
||||
---Conversation History---
|
||||
{history}
|
||||
|
||||
---Document Chunks(DC)---
|
||||
{content_data}
|
||||
|
||||
---RESPONSE GUIDELINES---
|
||||
**1. Content & Adherence:**
|
||||
- Strictly adhere to the provided context from the Knowledge Base. Do not invent, assume, or include any information not present in the source data.
|
||||
- If the answer cannot be found in the provided context, state that you do not have enough information to answer.
|
||||
- Ensure the response maintains continuity with the conversation history.
|
||||
|
||||
**2. Formatting & Language:**
|
||||
- Format the response using markdown with appropriate section headings.
|
||||
- The response language must match the user's question language.
|
||||
- Target format and length: {response_type}
|
||||
|
||||
**3. Citations / References:**
|
||||
- At the end of the response, under a "References" section, cite a maximum of 5 most relevant sources used.
|
||||
- Use the following formats for citations: `[DC] <file_path_or_document_name>`
|
||||
|
||||
---USER CONTEXT---
|
||||
- Additional user prompt: {user_prompt}
|
||||
|
||||
|
||||
Response:"""
|
||||
96
graphrag/light/smoke.py
Normal file
96
graphrag/light/smoke.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from api import settings
|
||||
import networkx as nx
|
||||
import logging
|
||||
import trio
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from graphrag.general.index import update_graph
|
||||
from graphrag.light.graph_extractor import GraphExtractor
|
||||
|
||||
settings.init_settings()
|
||||
|
||||
|
||||
def callback(prog=None, msg="Processing..."):
|
||||
logging.info(msg)
|
||||
|
||||
|
||||
async def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--tenant_id",
|
||||
default=False,
|
||||
help="Tenant ID",
|
||||
action="store",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--doc_id",
|
||||
default=False,
|
||||
help="Document ID",
|
||||
action="store",
|
||||
required=True,
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
e, doc = DocumentService.get_by_id(args.doc_id)
|
||||
if not e:
|
||||
raise LookupError("Document not found.")
|
||||
kb_id = doc.kb_id
|
||||
|
||||
chunks = [
|
||||
d["content_with_weight"]
|
||||
for d in settings.retrievaler.chunk_list(
|
||||
args.doc_id,
|
||||
args.tenant_id,
|
||||
[kb_id],
|
||||
max_count=6,
|
||||
fields=["content_with_weight"],
|
||||
)
|
||||
]
|
||||
|
||||
_, tenant = TenantService.get_by_id(args.tenant_id)
|
||||
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
|
||||
|
||||
graph, doc_ids = await update_graph(
|
||||
GraphExtractor,
|
||||
args.tenant_id,
|
||||
kb_id,
|
||||
args.doc_id,
|
||||
chunks,
|
||||
"English",
|
||||
llm_bdl,
|
||||
embed_bdl,
|
||||
callback,
|
||||
)
|
||||
|
||||
print(json.dumps(nx.node_link_data(graph), ensure_ascii=False, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
trio.run(main)
|
||||
218
graphrag/query_analyze_prompt.py
Normal file
218
graphrag/query_analyze_prompt.py
Normal file
@@ -0,0 +1,218 @@
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [LightRag](https://github.com/HKUDS/LightRAG)
|
||||
- [MiniRAG](https://github.com/HKUDS/MiniRAG)
|
||||
"""
|
||||
PROMPTS = {}
|
||||
|
||||
PROMPTS["minirag_query2kwd"] = """---Role---
|
||||
|
||||
You are a helpful assistant tasked with identifying both answer-type and low-level keywords in the user's query.
|
||||
|
||||
---Goal---
|
||||
|
||||
Given the query, list both answer-type and low-level keywords.
|
||||
answer_type_keywords focus on the type of the answer to the certain query, while low-level keywords focus on specific entities, details, or concrete terms.
|
||||
The answer_type_keywords must be selected from Answer type pool.
|
||||
This pool is in the form of a dictionary, where the key represents the Type you should choose from and the value represents the example samples.
|
||||
|
||||
---Instructions---
|
||||
|
||||
- Output the keywords in JSON format.
|
||||
- The JSON should have three keys:
|
||||
- "answer_type_keywords" for the types of the answer. In this list, the types with the highest likelihood should be placed at the forefront. No more than 3.
|
||||
- "entities_from_query" for specific entities or details. It must be extracted from the query.
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
Example 1:
|
||||
|
||||
Query: "How does international trade influence global economic stability?"
|
||||
Answer type pool: {{
|
||||
'PERSONAL LIFE': ['FAMILY TIME', 'HOME MAINTENANCE'],
|
||||
'STRATEGY': ['MARKETING PLAN', 'BUSINESS EXPANSION'],
|
||||
'SERVICE FACILITATION': ['ONLINE SUPPORT', 'CUSTOMER SERVICE TRAINING'],
|
||||
'PERSON': ['JANE DOE', 'JOHN SMITH'],
|
||||
'FOOD': ['PASTA', 'SUSHI'],
|
||||
'EMOTION': ['HAPPINESS', 'ANGER'],
|
||||
'PERSONAL EXPERIENCE': ['TRAVEL ABROAD', 'STUDYING ABROAD'],
|
||||
'INTERACTION': ['TEAM MEETING', 'NETWORKING EVENT'],
|
||||
'BEVERAGE': ['COFFEE', 'TEA'],
|
||||
'PLAN': ['ANNUAL BUDGET', 'PROJECT TIMELINE'],
|
||||
'GEO': ['NEW YORK CITY', 'SOUTH AFRICA'],
|
||||
'GEAR': ['CAMPING TENT', 'CYCLING HELMET'],
|
||||
'EMOJI': ['🎉', '🚀'],
|
||||
'BEHAVIOR': ['POSITIVE FEEDBACK', 'NEGATIVE CRITICISM'],
|
||||
'TONE': ['FORMAL', 'INFORMAL'],
|
||||
'LOCATION': ['DOWNTOWN', 'SUBURBS']
|
||||
}}
|
||||
################
|
||||
Output:
|
||||
{{
|
||||
"answer_type_keywords": ["STRATEGY","PERSONAL LIFE"],
|
||||
"entities_from_query": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
|
||||
}}
|
||||
#############################
|
||||
Example 2:
|
||||
|
||||
Query: "When was SpaceX's first rocket launch?"
|
||||
Answer type pool: {{
|
||||
'DATE AND TIME': ['2023-10-10 10:00', 'THIS AFTERNOON'],
|
||||
'ORGANIZATION': ['GLOBAL INITIATIVES CORPORATION', 'LOCAL COMMUNITY CENTER'],
|
||||
'PERSONAL LIFE': ['DAILY EXERCISE ROUTINE', 'FAMILY VACATION PLANNING'],
|
||||
'STRATEGY': ['NEW PRODUCT LAUNCH', 'YEAR-END SALES BOOST'],
|
||||
'SERVICE FACILITATION': ['REMOTE IT SUPPORT', 'ON-SITE TRAINING SESSIONS'],
|
||||
'PERSON': ['ALEXANDER HAMILTON', 'MARIA CURIE'],
|
||||
'FOOD': ['GRILLED SALMON', 'VEGETARIAN BURRITO'],
|
||||
'EMOTION': ['EXCITEMENT', 'DISAPPOINTMENT'],
|
||||
'PERSONAL EXPERIENCE': ['BIRTHDAY CELEBRATION', 'FIRST MARATHON'],
|
||||
'INTERACTION': ['OFFICE WATER COOLER CHAT', 'ONLINE FORUM DEBATE'],
|
||||
'BEVERAGE': ['ICED COFFEE', 'GREEN SMOOTHIE'],
|
||||
'PLAN': ['WEEKLY MEETING SCHEDULE', 'MONTHLY BUDGET OVERVIEW'],
|
||||
'GEO': ['MOUNT EVEREST BASE CAMP', 'THE GREAT BARRIER REEF'],
|
||||
'GEAR': ['PROFESSIONAL CAMERA EQUIPMENT', 'OUTDOOR HIKING GEAR'],
|
||||
'EMOJI': ['📅', '⏰'],
|
||||
'BEHAVIOR': ['PUNCTUALITY', 'HONESTY'],
|
||||
'TONE': ['CONFIDENTIAL', 'SATIRICAL'],
|
||||
'LOCATION': ['CENTRAL PARK', 'DOWNTOWN LIBRARY']
|
||||
}}
|
||||
|
||||
################
|
||||
Output:
|
||||
{{
|
||||
"answer_type_keywords": ["DATE AND TIME", "ORGANIZATION", "PLAN"],
|
||||
"entities_from_query": ["SpaceX", "Rocket launch", "Aerospace", "Power Recovery"]
|
||||
|
||||
}}
|
||||
#############################
|
||||
Example 3:
|
||||
|
||||
Query: "What is the role of education in reducing poverty?"
|
||||
Answer type pool: {{
|
||||
'PERSONAL LIFE': ['MANAGING WORK-LIFE BALANCE', 'HOME IMPROVEMENT PROJECTS'],
|
||||
'STRATEGY': ['MARKETING STRATEGIES FOR Q4', 'EXPANDING INTO NEW MARKETS'],
|
||||
'SERVICE FACILITATION': ['CUSTOMER SATISFACTION SURVEYS', 'STAFF RETENTION PROGRAMS'],
|
||||
'PERSON': ['ALBERT EINSTEIN', 'MARIA CALLAS'],
|
||||
'FOOD': ['PAN-FRIED STEAK', 'POACHED EGGS'],
|
||||
'EMOTION': ['OVERWHELM', 'CONTENTMENT'],
|
||||
'PERSONAL EXPERIENCE': ['LIVING ABROAD', 'STARTING A NEW JOB'],
|
||||
'INTERACTION': ['SOCIAL MEDIA ENGAGEMENT', 'PUBLIC SPEAKING'],
|
||||
'BEVERAGE': ['CAPPUCCINO', 'MATCHA LATTE'],
|
||||
'PLAN': ['ANNUAL FITNESS GOALS', 'QUARTERLY BUSINESS REVIEW'],
|
||||
'GEO': ['THE AMAZON RAINFOREST', 'THE GRAND CANYON'],
|
||||
'GEAR': ['SURFING ESSENTIALS', 'CYCLING ACCESSORIES'],
|
||||
'EMOJI': ['💻', '📱'],
|
||||
'BEHAVIOR': ['TEAMWORK', 'LEADERSHIP'],
|
||||
'TONE': ['FORMAL MEETING', 'CASUAL CONVERSATION'],
|
||||
'LOCATION': ['URBAN CITY CENTER', 'RURAL COUNTRYSIDE']
|
||||
}}
|
||||
|
||||
################
|
||||
Output:
|
||||
{{
|
||||
"answer_type_keywords": ["STRATEGY", "PERSON"],
|
||||
"entities_from_query": ["School access", "Literacy rates", "Job training", "Income inequality"]
|
||||
}}
|
||||
#############################
|
||||
Example 4:
|
||||
|
||||
Query: "Where is the capital of the United States?"
|
||||
Answer type pool: {{
|
||||
'ORGANIZATION': ['GREENPEACE', 'RED CROSS'],
|
||||
'PERSONAL LIFE': ['DAILY WORKOUT', 'HOME COOKING'],
|
||||
'STRATEGY': ['FINANCIAL INVESTMENT', 'BUSINESS EXPANSION'],
|
||||
'SERVICE FACILITATION': ['ONLINE SUPPORT', 'CUSTOMER SERVICE TRAINING'],
|
||||
'PERSON': ['ALBERTA SMITH', 'BENJAMIN JONES'],
|
||||
'FOOD': ['PASTA CARBONARA', 'SUSHI PLATTER'],
|
||||
'EMOTION': ['HAPPINESS', 'SADNESS'],
|
||||
'PERSONAL EXPERIENCE': ['TRAVEL ADVENTURE', 'BOOK CLUB'],
|
||||
'INTERACTION': ['TEAM BUILDING', 'NETWORKING MEETUP'],
|
||||
'BEVERAGE': ['LATTE', 'GREEN TEA'],
|
||||
'PLAN': ['WEIGHT LOSS', 'CAREER DEVELOPMENT'],
|
||||
'GEO': ['PARIS', 'NEW YORK'],
|
||||
'GEAR': ['CAMERA', 'HEADPHONES'],
|
||||
'EMOJI': ['🏢', '🌍'],
|
||||
'BEHAVIOR': ['POSITIVE THINKING', 'STRESS MANAGEMENT'],
|
||||
'TONE': ['FRIENDLY', 'PROFESSIONAL'],
|
||||
'LOCATION': ['DOWNTOWN', 'SUBURBS']
|
||||
}}
|
||||
################
|
||||
Output:
|
||||
{{
|
||||
"answer_type_keywords": ["LOCATION"],
|
||||
"entities_from_query": ["capital of the United States", "Washington", "New York"]
|
||||
}}
|
||||
#############################
|
||||
|
||||
-Real Data-
|
||||
######################
|
||||
Query: {query}
|
||||
Answer type pool:{TYPE_POOL}
|
||||
######################
|
||||
Output:
|
||||
|
||||
"""
|
||||
|
||||
PROMPTS["keywords_extraction"] = """---Role---
|
||||
|
||||
You are a helpful assistant tasked with identifying both high-level and low-level keywords in the user's query.
|
||||
|
||||
---Goal---
|
||||
|
||||
Given the query, list both high-level and low-level keywords. High-level keywords focus on overarching concepts or themes, while low-level keywords focus on specific entities, details, or concrete terms.
|
||||
|
||||
---Instructions---
|
||||
|
||||
- Output the keywords in JSON format.
|
||||
- The JSON should have two keys:
|
||||
- "high_level_keywords" for overarching concepts or themes.
|
||||
- "low_level_keywords" for specific entities or details.
|
||||
|
||||
######################
|
||||
-Examples-
|
||||
######################
|
||||
{examples}
|
||||
|
||||
#############################
|
||||
-Real Data-
|
||||
######################
|
||||
Query: {query}
|
||||
######################
|
||||
The `Output` should be human text, not unicode characters. Keep the same language as `Query`.
|
||||
Output:
|
||||
|
||||
"""
|
||||
|
||||
PROMPTS["keywords_extraction_examples"] = [
|
||||
"""Example 1:
|
||||
|
||||
Query: "How does international trade influence global economic stability?"
|
||||
################
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["International trade", "Global economic stability", "Economic impact"],
|
||||
"low_level_keywords": ["Trade agreements", "Tariffs", "Currency exchange", "Imports", "Exports"]
|
||||
}
|
||||
#############################""",
|
||||
"""Example 2:
|
||||
|
||||
Query: "What are the environmental consequences of deforestation on biodiversity?"
|
||||
################
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["Environmental consequences", "Deforestation", "Biodiversity loss"],
|
||||
"low_level_keywords": ["Species extinction", "Habitat destruction", "Carbon emissions", "Rainforest", "Ecosystem"]
|
||||
}
|
||||
#############################""",
|
||||
"""Example 3:
|
||||
|
||||
Query: "What is the role of education in reducing poverty?"
|
||||
################
|
||||
Output:
|
||||
{
|
||||
"high_level_keywords": ["Education", "Poverty reduction", "Socioeconomic development"],
|
||||
"low_level_keywords": ["School access", "Literacy rates", "Job training", "Income inequality"]
|
||||
}
|
||||
#############################""",
|
||||
]
|
||||
338
graphrag/search.py
Normal file
338
graphrag/search.py
Normal file
@@ -0,0 +1,338 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
from collections import defaultdict
|
||||
from copy import deepcopy
|
||||
import json_repair
|
||||
import pandas as pd
|
||||
import trio
|
||||
|
||||
from api.utils import get_uuid
|
||||
from graphrag.query_analyze_prompt import PROMPTS
|
||||
from graphrag.utils import get_entity_type2samples, get_llm_cache, set_llm_cache, get_relation
|
||||
from rag.utils import num_tokens_from_string, get_float
|
||||
from rag.utils.doc_store_conn import OrderByExpr
|
||||
|
||||
from rag.nlp.search import Dealer, index_name
|
||||
|
||||
|
||||
class KGSearch(Dealer):
|
||||
def _chat(self, llm_bdl, system, history, gen_conf):
|
||||
response = get_llm_cache(llm_bdl.llm_name, system, history, gen_conf)
|
||||
if response:
|
||||
return response
|
||||
response = llm_bdl.chat(system, history, gen_conf)
|
||||
if response.find("**ERROR**") >= 0:
|
||||
raise Exception(response)
|
||||
set_llm_cache(llm_bdl.llm_name, system, response, history, gen_conf)
|
||||
return response
|
||||
|
||||
def query_rewrite(self, llm, question, idxnms, kb_ids):
|
||||
ty2ents = trio.run(lambda: get_entity_type2samples(idxnms, kb_ids))
|
||||
hint_prompt = PROMPTS["minirag_query2kwd"].format(query=question,
|
||||
TYPE_POOL=json.dumps(ty2ents, ensure_ascii=False, indent=2))
|
||||
result = self._chat(llm, hint_prompt, [{"role": "user", "content": "Output:"}], {})
|
||||
try:
|
||||
keywords_data = json_repair.loads(result)
|
||||
type_keywords = keywords_data.get("answer_type_keywords", [])
|
||||
entities_from_query = keywords_data.get("entities_from_query", [])[:5]
|
||||
return type_keywords, entities_from_query
|
||||
except json_repair.JSONDecodeError:
|
||||
try:
|
||||
result = result.replace(hint_prompt[:-1], '').replace('user', '').replace('model', '').strip()
|
||||
result = '{' + result.split('{')[1].split('}')[0] + '}'
|
||||
keywords_data = json_repair.loads(result)
|
||||
type_keywords = keywords_data.get("answer_type_keywords", [])
|
||||
entities_from_query = keywords_data.get("entities_from_query", [])[:5]
|
||||
return type_keywords, entities_from_query
|
||||
# Handle parsing error
|
||||
except Exception as e:
|
||||
logging.exception(f"JSON parsing error: {result} -> {e}")
|
||||
raise e
|
||||
|
||||
def _ent_info_from_(self, es_res, sim_thr=0.3):
|
||||
res = {}
|
||||
flds = ["content_with_weight", "_score", "entity_kwd", "rank_flt", "n_hop_with_weight"]
|
||||
es_res = self.dataStore.getFields(es_res, flds)
|
||||
for _, ent in es_res.items():
|
||||
for f in flds:
|
||||
if f in ent and ent[f] is None:
|
||||
del ent[f]
|
||||
if get_float(ent.get("_score", 0)) < sim_thr:
|
||||
continue
|
||||
if isinstance(ent["entity_kwd"], list):
|
||||
ent["entity_kwd"] = ent["entity_kwd"][0]
|
||||
res[ent["entity_kwd"]] = {
|
||||
"sim": get_float(ent.get("_score", 0)),
|
||||
"pagerank": get_float(ent.get("rank_flt", 0)),
|
||||
"n_hop_ents": json.loads(ent.get("n_hop_with_weight", "[]")),
|
||||
"description": ent.get("content_with_weight", "{}")
|
||||
}
|
||||
return res
|
||||
|
||||
def _relation_info_from_(self, es_res, sim_thr=0.3):
|
||||
res = {}
|
||||
es_res = self.dataStore.getFields(es_res, ["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd",
|
||||
"weight_int"])
|
||||
for _, ent in es_res.items():
|
||||
if get_float(ent["_score"]) < sim_thr:
|
||||
continue
|
||||
f, t = sorted([ent["from_entity_kwd"], ent["to_entity_kwd"]])
|
||||
if isinstance(f, list):
|
||||
f = f[0]
|
||||
if isinstance(t, list):
|
||||
t = t[0]
|
||||
res[(f, t)] = {
|
||||
"sim": get_float(ent["_score"]),
|
||||
"pagerank": get_float(ent.get("weight_int", 0)),
|
||||
"description": ent["content_with_weight"]
|
||||
}
|
||||
return res
|
||||
|
||||
def get_relevant_ents_by_keywords(self, keywords, filters, idxnms, kb_ids, emb_mdl, sim_thr=0.3, N=56):
|
||||
if not keywords:
|
||||
return {}
|
||||
filters = deepcopy(filters)
|
||||
filters["knowledge_graph_kwd"] = "entity"
|
||||
matchDense = self.get_vector(", ".join(keywords), emb_mdl, 1024, sim_thr)
|
||||
es_res = self.dataStore.search(["content_with_weight", "entity_kwd", "rank_flt"], [], filters, [matchDense],
|
||||
OrderByExpr(), 0, N,
|
||||
idxnms, kb_ids)
|
||||
return self._ent_info_from_(es_res, sim_thr)
|
||||
|
||||
def get_relevant_relations_by_txt(self, txt, filters, idxnms, kb_ids, emb_mdl, sim_thr=0.3, N=56):
|
||||
if not txt:
|
||||
return {}
|
||||
filters = deepcopy(filters)
|
||||
filters["knowledge_graph_kwd"] = "relation"
|
||||
matchDense = self.get_vector(txt, emb_mdl, 1024, sim_thr)
|
||||
es_res = self.dataStore.search(
|
||||
["content_with_weight", "_score", "from_entity_kwd", "to_entity_kwd", "weight_int"],
|
||||
[], filters, [matchDense], OrderByExpr(), 0, N, idxnms, kb_ids)
|
||||
return self._relation_info_from_(es_res, sim_thr)
|
||||
|
||||
def get_relevant_ents_by_types(self, types, filters, idxnms, kb_ids, N=56):
|
||||
if not types:
|
||||
return {}
|
||||
filters = deepcopy(filters)
|
||||
filters["knowledge_graph_kwd"] = "entity"
|
||||
filters["entity_type_kwd"] = types
|
||||
ordr = OrderByExpr()
|
||||
ordr.desc("rank_flt")
|
||||
es_res = self.dataStore.search(["entity_kwd", "rank_flt"], [], filters, [], ordr, 0, N,
|
||||
idxnms, kb_ids)
|
||||
return self._ent_info_from_(es_res, 0)
|
||||
|
||||
def retrieval(self, question: str,
|
||||
tenant_ids: str | list[str],
|
||||
kb_ids: list[str],
|
||||
emb_mdl,
|
||||
llm,
|
||||
max_token: int = 8196,
|
||||
ent_topn: int = 6,
|
||||
rel_topn: int = 6,
|
||||
comm_topn: int = 1,
|
||||
ent_sim_threshold: float = 0.3,
|
||||
rel_sim_threshold: float = 0.3,
|
||||
**kwargs
|
||||
):
|
||||
qst = question
|
||||
filters = self.get_filters({"kb_ids": kb_ids})
|
||||
if isinstance(tenant_ids, str):
|
||||
tenant_ids = tenant_ids.split(",")
|
||||
idxnms = [index_name(tid) for tid in tenant_ids]
|
||||
ty_kwds = []
|
||||
try:
|
||||
ty_kwds, ents = self.query_rewrite(llm, qst, [index_name(tid) for tid in tenant_ids], kb_ids)
|
||||
logging.info(f"Q: {qst}, Types: {ty_kwds}, Entities: {ents}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
ents = [qst]
|
||||
pass
|
||||
|
||||
ents_from_query = self.get_relevant_ents_by_keywords(ents, filters, idxnms, kb_ids, emb_mdl, ent_sim_threshold)
|
||||
ents_from_types = self.get_relevant_ents_by_types(ty_kwds, filters, idxnms, kb_ids, 10000)
|
||||
rels_from_txt = self.get_relevant_relations_by_txt(qst, filters, idxnms, kb_ids, emb_mdl, rel_sim_threshold)
|
||||
nhop_pathes = defaultdict(dict)
|
||||
for _, ent in ents_from_query.items():
|
||||
nhops = ent.get("n_hop_ents", [])
|
||||
if not isinstance(nhops, list):
|
||||
logging.warning(f"Abnormal n_hop_ents: {nhops}")
|
||||
continue
|
||||
for nbr in nhops:
|
||||
path = nbr["path"]
|
||||
wts = nbr["weights"]
|
||||
for i in range(len(path) - 1):
|
||||
f, t = path[i], path[i + 1]
|
||||
if (f, t) in nhop_pathes:
|
||||
nhop_pathes[(f, t)]["sim"] += ent["sim"] / (2 + i)
|
||||
else:
|
||||
nhop_pathes[(f, t)]["sim"] = ent["sim"] / (2 + i)
|
||||
nhop_pathes[(f, t)]["pagerank"] = wts[i]
|
||||
|
||||
logging.info("Retrieved entities: {}".format(list(ents_from_query.keys())))
|
||||
logging.info("Retrieved relations: {}".format(list(rels_from_txt.keys())))
|
||||
logging.info("Retrieved entities from types({}): {}".format(ty_kwds, list(ents_from_types.keys())))
|
||||
logging.info("Retrieved N-hops: {}".format(list(nhop_pathes.keys())))
|
||||
|
||||
# P(E|Q) => P(E) * P(Q|E) => pagerank * sim
|
||||
for ent in ents_from_types.keys():
|
||||
if ent not in ents_from_query:
|
||||
continue
|
||||
ents_from_query[ent]["sim"] *= 2
|
||||
|
||||
for (f, t) in rels_from_txt.keys():
|
||||
pair = tuple(sorted([f, t]))
|
||||
s = 0
|
||||
if pair in nhop_pathes:
|
||||
s += nhop_pathes[pair]["sim"]
|
||||
del nhop_pathes[pair]
|
||||
if f in ents_from_types:
|
||||
s += 1
|
||||
if t in ents_from_types:
|
||||
s += 1
|
||||
rels_from_txt[(f, t)]["sim"] *= s + 1
|
||||
|
||||
# This is for the relations from n-hop but not by query search
|
||||
for (f, t) in nhop_pathes.keys():
|
||||
s = 0
|
||||
if f in ents_from_types:
|
||||
s += 1
|
||||
if t in ents_from_types:
|
||||
s += 1
|
||||
rels_from_txt[(f, t)] = {
|
||||
"sim": nhop_pathes[(f, t)]["sim"] * (s + 1),
|
||||
"pagerank": nhop_pathes[(f, t)]["pagerank"]
|
||||
}
|
||||
|
||||
ents_from_query = sorted(ents_from_query.items(), key=lambda x: x[1]["sim"] * x[1]["pagerank"], reverse=True)[
|
||||
:ent_topn]
|
||||
rels_from_txt = sorted(rels_from_txt.items(), key=lambda x: x[1]["sim"] * x[1]["pagerank"], reverse=True)[
|
||||
:rel_topn]
|
||||
|
||||
ents = []
|
||||
relas = []
|
||||
for n, ent in ents_from_query:
|
||||
ents.append({
|
||||
"Entity": n,
|
||||
"Score": "%.2f" % (ent["sim"] * ent["pagerank"]),
|
||||
"Description": json.loads(ent["description"]).get("description", "") if ent["description"] else ""
|
||||
})
|
||||
max_token -= num_tokens_from_string(str(ents[-1]))
|
||||
if max_token <= 0:
|
||||
ents = ents[:-1]
|
||||
break
|
||||
|
||||
for (f, t), rel in rels_from_txt:
|
||||
if not rel.get("description"):
|
||||
for tid in tenant_ids:
|
||||
rela = get_relation(tid, kb_ids, f, t)
|
||||
if rela:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
rel["description"] = rela["description"]
|
||||
desc = rel["description"]
|
||||
try:
|
||||
desc = json.loads(desc).get("description", "")
|
||||
except Exception:
|
||||
pass
|
||||
relas.append({
|
||||
"From Entity": f,
|
||||
"To Entity": t,
|
||||
"Score": "%.2f" % (rel["sim"] * rel["pagerank"]),
|
||||
"Description": desc
|
||||
})
|
||||
max_token -= num_tokens_from_string(str(relas[-1]))
|
||||
if max_token <= 0:
|
||||
relas = relas[:-1]
|
||||
break
|
||||
|
||||
if ents:
|
||||
ents = "\n---- Entities ----\n{}".format(pd.DataFrame(ents).to_csv())
|
||||
else:
|
||||
ents = ""
|
||||
if relas:
|
||||
relas = "\n---- Relations ----\n{}".format(pd.DataFrame(relas).to_csv())
|
||||
else:
|
||||
relas = ""
|
||||
|
||||
return {
|
||||
"chunk_id": get_uuid(),
|
||||
"content_ltks": "",
|
||||
"content_with_weight": ents + relas + self._community_retrieval_([n for n, _ in ents_from_query], filters, kb_ids, idxnms,
|
||||
comm_topn, max_token),
|
||||
"doc_id": "",
|
||||
"docnm_kwd": "Related content in Knowledge Graph",
|
||||
"kb_id": kb_ids,
|
||||
"important_kwd": [],
|
||||
"image_id": "",
|
||||
"similarity": 1.,
|
||||
"vector_similarity": 1.,
|
||||
"term_similarity": 0,
|
||||
"vector": [],
|
||||
"positions": [],
|
||||
}
|
||||
|
||||
def _community_retrieval_(self, entities, condition, kb_ids, idxnms, topn, max_token):
|
||||
## Community retrieval
|
||||
fields = ["docnm_kwd", "content_with_weight"]
|
||||
odr = OrderByExpr()
|
||||
odr.desc("weight_flt")
|
||||
fltr = deepcopy(condition)
|
||||
fltr["knowledge_graph_kwd"] = "community_report"
|
||||
fltr["entities_kwd"] = entities
|
||||
comm_res = self.dataStore.search(fields, [], fltr, [],
|
||||
OrderByExpr(), 0, topn, idxnms, kb_ids)
|
||||
comm_res_fields = self.dataStore.getFields(comm_res, fields)
|
||||
txts = []
|
||||
for ii, (_, row) in enumerate(comm_res_fields.items()):
|
||||
obj = json.loads(row["content_with_weight"])
|
||||
txts.append("# {}. {}\n## Content\n{}\n## Evidences\n{}\n".format(
|
||||
ii + 1, row["docnm_kwd"], obj["report"], obj["evidences"]))
|
||||
max_token -= num_tokens_from_string(str(txts[-1]))
|
||||
|
||||
if not txts:
|
||||
return ""
|
||||
return "\n---- Community Report ----\n" + "\n".join(txts)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from api import settings
|
||||
import argparse
|
||||
from api.db import LLMType
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from rag.nlp import search
|
||||
|
||||
settings.init_settings()
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('-t', '--tenant_id', default=False, help="Tenant ID", action='store', required=True)
|
||||
parser.add_argument('-d', '--kb_id', default=False, help="Knowledge base ID", action='store', required=True)
|
||||
parser.add_argument('-q', '--question', default=False, help="Question", action='store', required=True)
|
||||
args = parser.parse_args()
|
||||
|
||||
kb_id = args.kb_id
|
||||
_, tenant = TenantService.get_by_id(args.tenant_id)
|
||||
llm_bdl = LLMBundle(args.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
embed_bdl = LLMBundle(args.tenant_id, LLMType.EMBEDDING, kb.embd_id)
|
||||
|
||||
kg = KGSearch(settings.docStoreConn)
|
||||
print(kg.retrieval({"question": args.question, "kb_ids": [kb_id]},
|
||||
search.index_name(kb.tenant_id), [kb_id], embed_bdl, llm_bdl))
|
||||
628
graphrag/utils.py
Normal file
628
graphrag/utils.py
Normal file
@@ -0,0 +1,628 @@
|
||||
# Copyright (c) 2024 Microsoft Corporation.
|
||||
# Licensed under the MIT License
|
||||
"""
|
||||
Reference:
|
||||
- [graphrag](https://github.com/microsoft/graphrag)
|
||||
- [LightRag](https://github.com/HKUDS/LightRAG)
|
||||
"""
|
||||
|
||||
import dataclasses
|
||||
import html
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from collections import defaultdict
|
||||
from hashlib import md5
|
||||
from typing import Any, Callable, Set, Tuple
|
||||
|
||||
import networkx as nx
|
||||
import numpy as np
|
||||
import trio
|
||||
import xxhash
|
||||
from networkx.readwrite import json_graph
|
||||
|
||||
from api import settings
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import timeout
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.utils.doc_store_conn import OrderByExpr
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
GRAPH_FIELD_SEP = "<SEP>"
|
||||
|
||||
ErrorHandlerFn = Callable[[BaseException | None, str | None, dict | None], None]
|
||||
|
||||
chat_limiter = trio.CapacityLimiter(int(os.environ.get("MAX_CONCURRENT_CHATS", 10)))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class GraphChange:
|
||||
removed_nodes: Set[str] = dataclasses.field(default_factory=set)
|
||||
added_updated_nodes: Set[str] = dataclasses.field(default_factory=set)
|
||||
removed_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
|
||||
added_updated_edges: Set[Tuple[str, str]] = dataclasses.field(default_factory=set)
|
||||
|
||||
|
||||
def perform_variable_replacements(input: str, history: list[dict] | None = None, variables: dict | None = None) -> str:
|
||||
"""Perform variable replacements on the input string and in a chat log."""
|
||||
if history is None:
|
||||
history = []
|
||||
if variables is None:
|
||||
variables = {}
|
||||
result = input
|
||||
|
||||
def replace_all(input: str) -> str:
|
||||
result = input
|
||||
for k, v in variables.items():
|
||||
result = result.replace(f"{{{k}}}", str(v))
|
||||
return result
|
||||
|
||||
result = replace_all(result)
|
||||
for i, entry in enumerate(history):
|
||||
if entry.get("role") == "system":
|
||||
entry["content"] = replace_all(entry.get("content") or "")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def clean_str(input: Any) -> str:
|
||||
"""Clean an input string by removing HTML escapes, control characters, and other unwanted characters."""
|
||||
# If we get non-string input, just give it back
|
||||
if not isinstance(input, str):
|
||||
return input
|
||||
|
||||
result = html.unescape(input.strip())
|
||||
# https://stackoverflow.com/questions/4324790/removing-control-characters-from-a-string-in-python
|
||||
return re.sub(r"[\"\x00-\x1f\x7f-\x9f]", "", result)
|
||||
|
||||
|
||||
def dict_has_keys_with_types(data: dict, expected_fields: list[tuple[str, type]]) -> bool:
|
||||
"""Return True if the given dictionary has the given keys with the given types."""
|
||||
for field, field_type in expected_fields:
|
||||
if field not in data:
|
||||
return False
|
||||
|
||||
value = data[field]
|
||||
if not isinstance(value, field_type):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def get_llm_cache(llmnm, txt, history, genconf):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
hasher.update(str(history).encode("utf-8"))
|
||||
hasher.update(str(genconf).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
bin = REDIS_CONN.get(k)
|
||||
if not bin:
|
||||
return None
|
||||
return bin
|
||||
|
||||
|
||||
def set_llm_cache(llmnm, txt, v, history, genconf):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
hasher.update(str(history).encode("utf-8"))
|
||||
hasher.update(str(genconf).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
REDIS_CONN.set(k, v.encode("utf-8"), 24 * 3600)
|
||||
|
||||
|
||||
def get_embed_cache(llmnm, txt):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
bin = REDIS_CONN.get(k)
|
||||
if not bin:
|
||||
return
|
||||
return np.array(json.loads(bin))
|
||||
|
||||
|
||||
def set_embed_cache(llmnm, txt, arr):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(llmnm).encode("utf-8"))
|
||||
hasher.update(str(txt).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
arr = json.dumps(arr.tolist() if isinstance(arr, np.ndarray) else arr)
|
||||
REDIS_CONN.set(k, arr.encode("utf-8"), 24 * 3600)
|
||||
|
||||
|
||||
def get_tags_from_cache(kb_ids):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(kb_ids).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
bin = REDIS_CONN.get(k)
|
||||
if not bin:
|
||||
return
|
||||
return bin
|
||||
|
||||
|
||||
def set_tags_to_cache(kb_ids, tags):
|
||||
hasher = xxhash.xxh64()
|
||||
hasher.update(str(kb_ids).encode("utf-8"))
|
||||
|
||||
k = hasher.hexdigest()
|
||||
REDIS_CONN.set(k, json.dumps(tags).encode("utf-8"), 600)
|
||||
|
||||
|
||||
def tidy_graph(graph: nx.Graph, callback, check_attribute: bool = True):
|
||||
"""
|
||||
Ensure all nodes and edges in the graph have some essential attribute.
|
||||
"""
|
||||
|
||||
def is_valid_item(node_attrs: dict) -> bool:
|
||||
valid_node = True
|
||||
for attr in ["description", "source_id"]:
|
||||
if attr not in node_attrs:
|
||||
valid_node = False
|
||||
break
|
||||
return valid_node
|
||||
|
||||
if check_attribute:
|
||||
purged_nodes = []
|
||||
for node, node_attrs in graph.nodes(data=True):
|
||||
if not is_valid_item(node_attrs):
|
||||
purged_nodes.append(node)
|
||||
for node in purged_nodes:
|
||||
graph.remove_node(node)
|
||||
if purged_nodes and callback:
|
||||
callback(msg=f"Purged {len(purged_nodes)} nodes from graph due to missing essential attributes.")
|
||||
|
||||
purged_edges = []
|
||||
for source, target, attr in graph.edges(data=True):
|
||||
if check_attribute:
|
||||
if not is_valid_item(attr):
|
||||
purged_edges.append((source, target))
|
||||
if "keywords" not in attr:
|
||||
attr["keywords"] = []
|
||||
for source, target in purged_edges:
|
||||
graph.remove_edge(source, target)
|
||||
if purged_edges and callback:
|
||||
callback(msg=f"Purged {len(purged_edges)} edges from graph due to missing essential attributes.")
|
||||
|
||||
|
||||
def get_from_to(node1, node2):
|
||||
if node1 < node2:
|
||||
return (node1, node2)
|
||||
else:
|
||||
return (node2, node1)
|
||||
|
||||
|
||||
def graph_merge(g1: nx.Graph, g2: nx.Graph, change: GraphChange):
|
||||
"""Merge graph g2 into g1 in place."""
|
||||
for node_name, attr in g2.nodes(data=True):
|
||||
change.added_updated_nodes.add(node_name)
|
||||
if not g1.has_node(node_name):
|
||||
g1.add_node(node_name, **attr)
|
||||
continue
|
||||
node = g1.nodes[node_name]
|
||||
node["description"] += GRAPH_FIELD_SEP + attr["description"]
|
||||
# A node's source_id indicates which chunks it came from.
|
||||
node["source_id"] += attr["source_id"]
|
||||
|
||||
for source, target, attr in g2.edges(data=True):
|
||||
change.added_updated_edges.add(get_from_to(source, target))
|
||||
edge = g1.get_edge_data(source, target)
|
||||
if edge is None:
|
||||
g1.add_edge(source, target, **attr)
|
||||
continue
|
||||
edge["weight"] += attr.get("weight", 0)
|
||||
edge["description"] += GRAPH_FIELD_SEP + attr["description"]
|
||||
edge["keywords"] += attr["keywords"]
|
||||
# A edge's source_id indicates which chunks it came from.
|
||||
edge["source_id"] += attr["source_id"]
|
||||
|
||||
for node_degree in g1.degree:
|
||||
g1.nodes[str(node_degree[0])]["rank"] = int(node_degree[1])
|
||||
# A graph's source_id indicates which documents it came from.
|
||||
if "source_id" not in g1.graph:
|
||||
g1.graph["source_id"] = []
|
||||
g1.graph["source_id"] += g2.graph.get("source_id", [])
|
||||
return g1
|
||||
|
||||
|
||||
def compute_args_hash(*args):
|
||||
return md5(str(args).encode()).hexdigest()
|
||||
|
||||
|
||||
def handle_single_entity_extraction(
|
||||
record_attributes: list[str],
|
||||
chunk_key: str,
|
||||
):
|
||||
if len(record_attributes) < 4 or record_attributes[0] != '"entity"':
|
||||
return None
|
||||
# add this record as a node in the G
|
||||
entity_name = clean_str(record_attributes[1].upper())
|
||||
if not entity_name.strip():
|
||||
return None
|
||||
entity_type = clean_str(record_attributes[2].upper())
|
||||
entity_description = clean_str(record_attributes[3])
|
||||
entity_source_id = chunk_key
|
||||
return dict(
|
||||
entity_name=entity_name.upper(),
|
||||
entity_type=entity_type.upper(),
|
||||
description=entity_description,
|
||||
source_id=entity_source_id,
|
||||
)
|
||||
|
||||
|
||||
def handle_single_relationship_extraction(record_attributes: list[str], chunk_key: str):
|
||||
if len(record_attributes) < 5 or record_attributes[0] != '"relationship"':
|
||||
return None
|
||||
# add this record as edge
|
||||
source = clean_str(record_attributes[1].upper())
|
||||
target = clean_str(record_attributes[2].upper())
|
||||
edge_description = clean_str(record_attributes[3])
|
||||
|
||||
edge_keywords = clean_str(record_attributes[4])
|
||||
edge_source_id = chunk_key
|
||||
weight = float(record_attributes[-1]) if is_float_regex(record_attributes[-1]) else 1.0
|
||||
pair = sorted([source.upper(), target.upper()])
|
||||
return dict(
|
||||
src_id=pair[0],
|
||||
tgt_id=pair[1],
|
||||
weight=weight,
|
||||
description=edge_description,
|
||||
keywords=edge_keywords,
|
||||
source_id=edge_source_id,
|
||||
metadata={"created_at": time.time()},
|
||||
)
|
||||
|
||||
|
||||
def pack_user_ass_to_openai_messages(*args: str):
|
||||
roles = ["user", "assistant"]
|
||||
return [{"role": roles[i % 2], "content": content} for i, content in enumerate(args)]
|
||||
|
||||
|
||||
def split_string_by_multi_markers(content: str, markers: list[str]) -> list[str]:
|
||||
"""Split a string by multiple markers"""
|
||||
if not markers:
|
||||
return [content]
|
||||
results = re.split("|".join(re.escape(marker) for marker in markers), content)
|
||||
return [r.strip() for r in results if r.strip()]
|
||||
|
||||
|
||||
def is_float_regex(value):
|
||||
return bool(re.match(r"^[-+]?[0-9]*\.?[0-9]+$", value))
|
||||
|
||||
|
||||
def chunk_id(chunk):
|
||||
return xxhash.xxh64((chunk["content_with_weight"] + chunk["kb_id"]).encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
async def graph_node_to_chunk(kb_id, embd_mdl, ent_name, meta, chunks):
|
||||
global chat_limiter
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
chunk = {
|
||||
"id": get_uuid(),
|
||||
"important_kwd": [ent_name],
|
||||
"title_tks": rag_tokenizer.tokenize(ent_name),
|
||||
"entity_kwd": ent_name,
|
||||
"knowledge_graph_kwd": "entity",
|
||||
"entity_type_kwd": meta["entity_type"],
|
||||
"content_with_weight": json.dumps(meta, ensure_ascii=False),
|
||||
"content_ltks": rag_tokenizer.tokenize(meta["description"]),
|
||||
"source_id": meta["source_id"],
|
||||
"kb_id": kb_id,
|
||||
"available_int": 0,
|
||||
}
|
||||
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
|
||||
ebd = get_embed_cache(embd_mdl.llm_name, ent_name)
|
||||
if ebd is None:
|
||||
async with chat_limiter:
|
||||
with trio.fail_after(3 if enable_timeout_assertion else 30000000):
|
||||
ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([ent_name]))
|
||||
ebd = ebd[0]
|
||||
set_embed_cache(embd_mdl.llm_name, ent_name, ebd)
|
||||
assert ebd is not None
|
||||
chunk["q_%d_vec" % len(ebd)] = ebd
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
@timeout(3, 3)
|
||||
def get_relation(tenant_id, kb_id, from_ent_name, to_ent_name, size=1):
|
||||
ents = from_ent_name
|
||||
if isinstance(ents, str):
|
||||
ents = [from_ent_name]
|
||||
if isinstance(to_ent_name, str):
|
||||
to_ent_name = [to_ent_name]
|
||||
ents.extend(to_ent_name)
|
||||
ents = list(set(ents))
|
||||
conds = {"fields": ["content_with_weight"], "size": size, "from_entity_kwd": ents, "to_entity_kwd": ents, "knowledge_graph_kwd": ["relation"]}
|
||||
res = []
|
||||
es_res = settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id] if isinstance(kb_id, str) else kb_id)
|
||||
for id in es_res.ids:
|
||||
try:
|
||||
if size == 1:
|
||||
return json.loads(es_res.field[id]["content_with_weight"])
|
||||
res.append(json.loads(es_res.field[id]["content_with_weight"]))
|
||||
except Exception:
|
||||
continue
|
||||
return res
|
||||
|
||||
|
||||
async def graph_edge_to_chunk(kb_id, embd_mdl, from_ent_name, to_ent_name, meta, chunks):
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
chunk = {
|
||||
"id": get_uuid(),
|
||||
"from_entity_kwd": from_ent_name,
|
||||
"to_entity_kwd": to_ent_name,
|
||||
"knowledge_graph_kwd": "relation",
|
||||
"content_with_weight": json.dumps(meta, ensure_ascii=False),
|
||||
"content_ltks": rag_tokenizer.tokenize(meta["description"]),
|
||||
"important_kwd": meta["keywords"],
|
||||
"source_id": meta["source_id"],
|
||||
"weight_int": int(meta["weight"]),
|
||||
"kb_id": kb_id,
|
||||
"available_int": 0,
|
||||
}
|
||||
chunk["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(chunk["content_ltks"])
|
||||
txt = f"{from_ent_name}->{to_ent_name}"
|
||||
ebd = get_embed_cache(embd_mdl.llm_name, txt)
|
||||
if ebd is None:
|
||||
async with chat_limiter:
|
||||
with trio.fail_after(3 if enable_timeout_assertion else 300000000):
|
||||
ebd, _ = await trio.to_thread.run_sync(lambda: embd_mdl.encode([txt + f": {meta['description']}"]))
|
||||
ebd = ebd[0]
|
||||
set_embed_cache(embd_mdl.llm_name, txt, ebd)
|
||||
assert ebd is not None
|
||||
chunk["q_%d_vec" % len(ebd)] = ebd
|
||||
chunks.append(chunk)
|
||||
|
||||
|
||||
async def does_graph_contains(tenant_id, kb_id, doc_id):
|
||||
# Get doc_ids of graph
|
||||
fields = ["source_id"]
|
||||
condition = {
|
||||
"knowledge_graph_kwd": ["graph"],
|
||||
"removed_kwd": "N",
|
||||
}
|
||||
res = await trio.to_thread.run_sync(lambda: settings.docStoreConn.search(fields, [], condition, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [kb_id]))
|
||||
fields2 = settings.docStoreConn.getFields(res, fields)
|
||||
graph_doc_ids = set()
|
||||
for chunk_id in fields2.keys():
|
||||
graph_doc_ids = set(fields2[chunk_id]["source_id"])
|
||||
return doc_id in graph_doc_ids
|
||||
|
||||
|
||||
async def get_graph_doc_ids(tenant_id, kb_id) -> list[str]:
|
||||
conds = {"fields": ["source_id"], "removed_kwd": "N", "size": 1, "knowledge_graph_kwd": ["graph"]}
|
||||
res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search(conds, search.index_name(tenant_id), [kb_id]))
|
||||
doc_ids = []
|
||||
if res.total == 0:
|
||||
return doc_ids
|
||||
for id in res.ids:
|
||||
doc_ids = res.field[id]["source_id"]
|
||||
return doc_ids
|
||||
|
||||
|
||||
async def get_graph(tenant_id, kb_id, exclude_rebuild=None):
|
||||
conds = {"fields": ["content_with_weight", "removed_kwd", "source_id"], "size": 1, "knowledge_graph_kwd": ["graph"]}
|
||||
res = await trio.to_thread.run_sync(settings.retrievaler.search, conds, search.index_name(tenant_id), [kb_id])
|
||||
if not res.total == 0:
|
||||
for id in res.ids:
|
||||
try:
|
||||
if res.field[id]["removed_kwd"] == "N":
|
||||
g = json_graph.node_link_graph(json.loads(res.field[id]["content_with_weight"]), edges="edges")
|
||||
if "source_id" not in g.graph:
|
||||
g.graph["source_id"] = res.field[id]["source_id"]
|
||||
else:
|
||||
g = await rebuild_graph(tenant_id, kb_id, exclude_rebuild)
|
||||
return g
|
||||
except Exception:
|
||||
continue
|
||||
result = None
|
||||
return result
|
||||
|
||||
|
||||
async def set_graph(tenant_id: str, kb_id: str, embd_mdl, graph: nx.Graph, change: GraphChange, callback):
|
||||
global chat_limiter
|
||||
start = trio.current_time()
|
||||
|
||||
await trio.to_thread.run_sync(settings.docStoreConn.delete, {"knowledge_graph_kwd": ["graph", "subgraph"]}, search.index_name(tenant_id), kb_id)
|
||||
|
||||
if change.removed_nodes:
|
||||
await trio.to_thread.run_sync(settings.docStoreConn.delete, {"knowledge_graph_kwd": ["entity"], "entity_kwd": sorted(change.removed_nodes)}, search.index_name(tenant_id), kb_id)
|
||||
|
||||
if change.removed_edges:
|
||||
|
||||
async def del_edges(from_node, to_node):
|
||||
async with chat_limiter:
|
||||
await trio.to_thread.run_sync(
|
||||
settings.docStoreConn.delete, {"knowledge_graph_kwd": ["relation"], "from_entity_kwd": from_node, "to_entity_kwd": to_node}, search.index_name(tenant_id), kb_id
|
||||
)
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for from_node, to_node in change.removed_edges:
|
||||
nursery.start_soon(del_edges, from_node, to_node)
|
||||
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"set_graph removed {len(change.removed_nodes)} nodes and {len(change.removed_edges)} edges from index in {now - start:.2f}s.")
|
||||
start = now
|
||||
|
||||
chunks = [
|
||||
{
|
||||
"id": get_uuid(),
|
||||
"content_with_weight": json.dumps(nx.node_link_data(graph, edges="edges"), ensure_ascii=False),
|
||||
"knowledge_graph_kwd": "graph",
|
||||
"kb_id": kb_id,
|
||||
"source_id": graph.graph.get("source_id", []),
|
||||
"available_int": 0,
|
||||
"removed_kwd": "N",
|
||||
}
|
||||
]
|
||||
|
||||
# generate updated subgraphs
|
||||
for source in graph.graph["source_id"]:
|
||||
subgraph = graph.subgraph([n for n in graph.nodes if source in graph.nodes[n]["source_id"]]).copy()
|
||||
subgraph.graph["source_id"] = [source]
|
||||
for n in subgraph.nodes:
|
||||
subgraph.nodes[n]["source_id"] = [source]
|
||||
chunks.append(
|
||||
{
|
||||
"id": get_uuid(),
|
||||
"content_with_weight": json.dumps(nx.node_link_data(subgraph, edges="edges"), ensure_ascii=False),
|
||||
"knowledge_graph_kwd": "subgraph",
|
||||
"kb_id": kb_id,
|
||||
"source_id": [source],
|
||||
"available_int": 0,
|
||||
"removed_kwd": "N",
|
||||
}
|
||||
)
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for ii, node in enumerate(change.added_updated_nodes):
|
||||
node_attrs = graph.nodes[node]
|
||||
nursery.start_soon(graph_node_to_chunk, kb_id, embd_mdl, node, node_attrs, chunks)
|
||||
if ii % 100 == 9 and callback:
|
||||
callback(msg=f"Get embedding of nodes: {ii}/{len(change.added_updated_nodes)}")
|
||||
|
||||
async with trio.open_nursery() as nursery:
|
||||
for ii, (from_node, to_node) in enumerate(change.added_updated_edges):
|
||||
edge_attrs = graph.get_edge_data(from_node, to_node)
|
||||
if not edge_attrs:
|
||||
# added_updated_edges could record a non-existing edge if both from_node and to_node participate in nodes merging.
|
||||
continue
|
||||
nursery.start_soon(graph_edge_to_chunk, kb_id, embd_mdl, from_node, to_node, edge_attrs, chunks)
|
||||
if ii % 100 == 9 and callback:
|
||||
callback(msg=f"Get embedding of edges: {ii}/{len(change.added_updated_edges)}")
|
||||
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"set_graph converted graph change to {len(chunks)} chunks in {now - start:.2f}s.")
|
||||
start = now
|
||||
|
||||
enable_timeout_assertion = os.environ.get("ENABLE_TIMEOUT_ASSERTION")
|
||||
es_bulk_size = 4
|
||||
for b in range(0, len(chunks), es_bulk_size):
|
||||
with trio.fail_after(3 if enable_timeout_assertion else 30000000):
|
||||
doc_store_result = await trio.to_thread.run_sync(lambda: settings.docStoreConn.insert(chunks[b : b + es_bulk_size], search.index_name(tenant_id), kb_id))
|
||||
if b % 100 == es_bulk_size and callback:
|
||||
callback(msg=f"Insert chunks: {b}/{len(chunks)}")
|
||||
if doc_store_result:
|
||||
error_message = f"Insert chunk error: {doc_store_result}, please check log file and Elasticsearch/Infinity status!"
|
||||
raise Exception(error_message)
|
||||
now = trio.current_time()
|
||||
if callback:
|
||||
callback(msg=f"set_graph added/updated {len(change.added_updated_nodes)} nodes and {len(change.added_updated_edges)} edges from index in {now - start:.2f}s.")
|
||||
|
||||
|
||||
def is_continuous_subsequence(subseq, seq):
|
||||
def find_all_indexes(tup, value):
|
||||
indexes = []
|
||||
start = 0
|
||||
while True:
|
||||
try:
|
||||
index = tup.index(value, start)
|
||||
indexes.append(index)
|
||||
start = index + 1
|
||||
except ValueError:
|
||||
break
|
||||
return indexes
|
||||
|
||||
index_list = find_all_indexes(seq, subseq[0])
|
||||
for idx in index_list:
|
||||
if idx != len(seq) - 1:
|
||||
if seq[idx + 1] == subseq[-1]:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def merge_tuples(list1, list2):
|
||||
result = []
|
||||
for tup in list1:
|
||||
last_element = tup[-1]
|
||||
if last_element in tup[:-1]:
|
||||
result.append(tup)
|
||||
else:
|
||||
matching_tuples = [t for t in list2 if t[0] == last_element]
|
||||
already_match_flag = 0
|
||||
for match in matching_tuples:
|
||||
matchh = (match[1], match[0])
|
||||
if is_continuous_subsequence(match, tup) or is_continuous_subsequence(matchh, tup):
|
||||
continue
|
||||
already_match_flag = 1
|
||||
merged_tuple = tup + match[1:]
|
||||
result.append(merged_tuple)
|
||||
if not already_match_flag:
|
||||
result.append(tup)
|
||||
return result
|
||||
|
||||
|
||||
async def get_entity_type2samples(idxnms, kb_ids: list):
|
||||
es_res = await trio.to_thread.run_sync(lambda: settings.retrievaler.search({"knowledge_graph_kwd": "ty2ents", "kb_id": kb_ids, "size": 10000, "fields": ["content_with_weight"]}, idxnms, kb_ids))
|
||||
|
||||
res = defaultdict(list)
|
||||
for id in es_res.ids:
|
||||
smp = es_res.field[id].get("content_with_weight")
|
||||
if not smp:
|
||||
continue
|
||||
try:
|
||||
smp = json.loads(smp)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
for ty, ents in smp.items():
|
||||
res[ty].extend(ents)
|
||||
return res
|
||||
|
||||
|
||||
def flat_uniq_list(arr, key):
|
||||
res = []
|
||||
for a in arr:
|
||||
a = a[key]
|
||||
if isinstance(a, list):
|
||||
res.extend(a)
|
||||
else:
|
||||
res.append(a)
|
||||
return list(set(res))
|
||||
|
||||
|
||||
async def rebuild_graph(tenant_id, kb_id, exclude_rebuild=None):
|
||||
graph = nx.Graph()
|
||||
flds = ["knowledge_graph_kwd", "content_with_weight", "source_id"]
|
||||
bs = 256
|
||||
for i in range(0, 1024 * bs, bs):
|
||||
es_res = await trio.to_thread.run_sync(
|
||||
lambda: settings.docStoreConn.search(flds, [], {"kb_id": kb_id, "knowledge_graph_kwd": ["subgraph"]}, [], OrderByExpr(), i, bs, search.index_name(tenant_id), [kb_id])
|
||||
)
|
||||
# tot = settings.docStoreConn.getTotal(es_res)
|
||||
es_res = settings.docStoreConn.getFields(es_res, flds)
|
||||
|
||||
if len(es_res) == 0:
|
||||
break
|
||||
|
||||
for id, d in es_res.items():
|
||||
assert d["knowledge_graph_kwd"] == "subgraph"
|
||||
if isinstance(exclude_rebuild, list):
|
||||
if sum([n in d["source_id"] for n in exclude_rebuild]):
|
||||
continue
|
||||
elif exclude_rebuild in d["source_id"]:
|
||||
continue
|
||||
|
||||
next_graph = json_graph.node_link_graph(json.loads(d["content_with_weight"]), edges="edges")
|
||||
merged_graph = nx.compose(graph, next_graph)
|
||||
merged_source = {n: graph.nodes[n]["source_id"] + next_graph.nodes[n]["source_id"] for n in graph.nodes & next_graph.nodes}
|
||||
nx.set_node_attributes(merged_graph, merged_source, "source_id")
|
||||
if "source_id" in graph.graph:
|
||||
merged_graph.graph["source_id"] = graph.graph["source_id"] + next_graph.graph["source_id"]
|
||||
else:
|
||||
merged_graph.graph["source_id"] = next_graph.graph["source_id"]
|
||||
graph = merged_graph
|
||||
|
||||
if len(graph.nodes) == 0:
|
||||
return None
|
||||
graph.graph["source_id"] = sorted(graph.graph["source_id"])
|
||||
return graph
|
||||
Reference in New Issue
Block a user