将flask改成fastapi

This commit is contained in:
2025-10-13 13:18:03 +08:00
commit 88db2539b0
476 changed files with 739741 additions and 0 deletions

60
rag/svr/cache_file_svr.py Normal file
View File

@@ -0,0 +1,60 @@
#
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
import traceback
from api.db.db_models import close_connection
from api.db.services.task_service import TaskService
from rag.utils.storage_factory import STORAGE_IMPL
from rag.utils.redis_conn import REDIS_CONN
def collect():
doc_locations = TaskService.get_ongoing_doc_name()
logging.debug(doc_locations)
if len(doc_locations) == 0:
time.sleep(1)
return
return doc_locations
def main():
locations = collect()
if not locations:
return
logging.info(f"TASKS: {len(locations)}")
for kb_id, loc in locations:
try:
if REDIS_CONN.is_alive():
try:
key = "{}/{}".format(kb_id, loc)
if REDIS_CONN.exist(key):
continue
file_bin = STORAGE_IMPL.get(kb_id, loc)
REDIS_CONN.transaction(key, file_bin, 12 * 60)
logging.info("CACHE: {}".format(loc))
except Exception as e:
traceback.print_stack(e)
except Exception as e:
traceback.print_stack(e)
if __name__ == "__main__":
while True:
main()
close_connection()
time.sleep(1)

81
rag/svr/discord_svr.py Normal file
View File

@@ -0,0 +1,81 @@
#
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import discord
import requests
import base64
import asyncio
URL = '{YOUR_IP_ADDRESS:PORT}/v1/api/completion_aibotk' # Default: https://demo.ragflow.io/v1/api/completion_aibotk
JSON_DATA = {
"conversation_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxx", # Get conversation id from /api/new_conversation
"Authorization": "ragflow-xxxxxxxxxxxxxxxxxxxxxxxxxxxxx", # RAGFlow Assistant Chat Bot API Key
"word": "" # User question, don't need to initialize
}
DISCORD_BOT_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxx" #Get DISCORD_BOT_KEY from Discord Application
intents = discord.Intents.default()
intents.message_content = True
client = discord.Client(intents=intents)
@client.event
async def on_ready():
logging.info(f'We have logged in as {client.user}')
@client.event
async def on_message(message):
if message.author == client.user:
return
if client.user.mentioned_in(message):
if len(message.content.split('> ')) == 1:
await message.channel.send("Hi~ How can I help you? ")
else:
JSON_DATA['word']=message.content.split('> ')[1]
response = requests.post(URL, json=JSON_DATA)
response_data = response.json().get('data', [])
image_bool = False
for i in response_data:
if i['type'] == 1:
res = i['content']
if i['type'] == 3:
image_bool = True
image_data = base64.b64decode(i['url'])
with open('tmp_image.png','wb') as file:
file.write(image_data)
image= discord.File('tmp_image.png')
await message.channel.send(f"{message.author.mention}{res}")
if image_bool:
await message.channel.send(file=image)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(client.start(DISCORD_BOT_KEY))
except KeyboardInterrupt:
loop.run_until_complete(client.close())
finally:
loop.close()

109
rag/svr/jina_server.py Normal file
View File

@@ -0,0 +1,109 @@
#
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from jina import Deployment
from docarray import BaseDoc
from jina import Executor, requests
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import argparse
import torch
class Prompt(BaseDoc):
message: list[dict]
gen_conf: dict
class Generation(BaseDoc):
text: str
tokenizer = None
model_name = ""
class TokenStreamingExecutor(Executor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = AutoModelForCausalLM.from_pretrained(
model_name, device_map="auto", torch_dtype="auto"
)
@requests(on="/chat")
async def generate(self, doc: Prompt, **kwargs) -> Generation:
text = tokenizer.apply_chat_template(
doc.message,
tokenize=False,
)
inputs = tokenizer([text], return_tensors="pt")
generation_config = GenerationConfig(
**doc.gen_conf,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id
)
generated_ids = self.model.generate(
inputs.input_ids, generation_config=generation_config
)
generated_ids = [
output_ids[len(input_ids) :]
for input_ids, output_ids in zip(inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
yield Generation(text=response)
@requests(on="/stream")
async def task(self, doc: Prompt, **kwargs) -> Generation:
text = tokenizer.apply_chat_template(
doc.message,
tokenize=False,
)
input = tokenizer([text], return_tensors="pt")
input_len = input["input_ids"].shape[1]
max_new_tokens = 512
if "max_new_tokens" in doc.gen_conf:
max_new_tokens = doc.gen_conf.pop("max_new_tokens")
generation_config = GenerationConfig(
**doc.gen_conf,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id
)
for _ in range(max_new_tokens):
output = self.model.generate(
**input, max_new_tokens=1, generation_config=generation_config
)
if output[0][-1] == tokenizer.eos_token_id:
break
yield Generation(
text=tokenizer.decode(output[0][input_len:], skip_special_tokens=True)
)
input = {
"input_ids": output,
"attention_mask": torch.ones(1, len(output[0])),
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, help="Model name or path")
parser.add_argument("--port", default=12345, type=int, help="Jina serving port")
args = parser.parse_args()
model_name = args.model_name
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
with Deployment(
uses=TokenStreamingExecutor, port=args.port, protocol="grpc"
) as dep:
dep.block()

1009
rag/svr/task_executor.py Normal file

File diff suppressed because it is too large Load Diff