将flask改成fastapi
This commit is contained in:
18
api/__init__.py
Normal file
18
api/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from beartype.claw import beartype_this_package
|
||||
beartype_this_package()
|
||||
181
api/apps/__init__.py
Normal file
181
api/apps/__init__.py
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
from flask import Blueprint, Flask
|
||||
from werkzeug.wrappers.request import Request
|
||||
from flask_cors import CORS
|
||||
from flasgger import Swagger
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import close_connection
|
||||
from api.db.services import UserService
|
||||
from api.utils.json import CustomJSONEncoder
|
||||
from api.utils import commands
|
||||
|
||||
from flask_mail import Mail
|
||||
from flask_session import Session
|
||||
from flask_login import LoginManager
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response
|
||||
from api.constants import API_VERSION
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
Request.json = property(lambda self: self.get_json(force=True, silent=True))
|
||||
|
||||
app = Flask(__name__)
|
||||
smtp_mail_server = Mail()
|
||||
|
||||
# Add this at the beginning of your file to configure Swagger UI
|
||||
swagger_config = {
|
||||
"headers": [],
|
||||
"specs": [
|
||||
{
|
||||
"endpoint": "apispec",
|
||||
"route": "/apispec.json",
|
||||
"rule_filter": lambda rule: True, # Include all endpoints
|
||||
"model_filter": lambda tag: True, # Include all models
|
||||
}
|
||||
],
|
||||
"static_url_path": "/flasgger_static",
|
||||
"swagger_ui": True,
|
||||
"specs_route": "/apidocs/",
|
||||
}
|
||||
|
||||
swagger = Swagger(
|
||||
app,
|
||||
config=swagger_config,
|
||||
template={
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "RAGFlow API",
|
||||
"description": "",
|
||||
"version": "1.0.0",
|
||||
},
|
||||
"securityDefinitions": {
|
||||
"ApiKeyAuth": {"type": "apiKey", "name": "Authorization", "in": "header"}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
CORS(app, supports_credentials=True, max_age=2592000)
|
||||
app.url_map.strict_slashes = False
|
||||
app.json_encoder = CustomJSONEncoder
|
||||
app.errorhandler(Exception)(server_error_response)
|
||||
|
||||
## convince for dev and debug
|
||||
# app.config["LOGIN_DISABLED"] = True
|
||||
app.config["SESSION_PERMANENT"] = False
|
||||
app.config["SESSION_TYPE"] = "filesystem"
|
||||
app.config["MAX_CONTENT_LENGTH"] = int(
|
||||
os.environ.get("MAX_CONTENT_LENGTH", 1024 * 1024 * 1024)
|
||||
)
|
||||
|
||||
Session(app)
|
||||
login_manager = LoginManager()
|
||||
login_manager.init_app(app)
|
||||
|
||||
commands.register_commands(app)
|
||||
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
app_path_list = [
|
||||
path for path in pages_dir.glob("*_app.py") if not path.name.startswith(".")
|
||||
]
|
||||
api_path_list = [
|
||||
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||
]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
|
||||
|
||||
def register_page(page_path):
|
||||
path = f"{page_path}"
|
||||
|
||||
page_name = page_path.stem.removesuffix("_app")
|
||||
module_name = ".".join(
|
||||
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||
)
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
page.app = app
|
||||
page.manager = Blueprint(page_name, module_name)
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
app.register_blueprint(page.manager, url_prefix=url_prefix)
|
||||
return url_prefix
|
||||
|
||||
|
||||
pages_dir = [
|
||||
Path(__file__).parent,
|
||||
Path(__file__).parent.parent / "api" / "apps",
|
||||
Path(__file__).parent.parent / "api" / "apps" / "sdk",
|
||||
]
|
||||
|
||||
client_urls_prefix = [
|
||||
register_page(path) for dir in pages_dir for path in search_pages_path(dir)
|
||||
]
|
||||
|
||||
|
||||
@login_manager.request_loader
|
||||
def load_user(web_request):
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = web_request.headers.get("Authorization")
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
logging.warning("Authentication attempt with empty access token")
|
||||
return None
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
|
||||
return None
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
logging.warning(f"User {user[0].email} has empty access_token in database")
|
||||
return None
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
@app.teardown_request
|
||||
def _db_close(exc):
|
||||
close_connection()
|
||||
181
api/apps/__init___fastapi.py
Normal file
181
api/apps/__init___fastapi.py
Normal file
@@ -0,0 +1,181 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from pathlib import Path
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.middleware.trustedhost import TrustedHostMiddleware
|
||||
from starlette.middleware.sessions import SessionMiddleware
|
||||
try:
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
except ImportError:
|
||||
# 如果没有itsdangerous,使用jwt作为替代
|
||||
import jwt
|
||||
Serializer = jwt
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import close_connection
|
||||
from api.db.services import UserService
|
||||
from api.utils.json import CustomJSONEncoder
|
||||
from api.utils import commands
|
||||
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response
|
||||
from api.constants import API_VERSION
|
||||
|
||||
__all__ = ["app"]
|
||||
|
||||
def create_app() -> FastAPI:
|
||||
"""创建FastAPI应用实例"""
|
||||
app = FastAPI(
|
||||
title="RAGFlow API",
|
||||
description="RAGFlow API Server",
|
||||
version="1.0.0",
|
||||
docs_url="/apidocs/",
|
||||
redoc_url="/redoc/",
|
||||
openapi_url="/apispec.json"
|
||||
)
|
||||
|
||||
# 添加CORS中间件
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"], # 生产环境中应该设置具体的域名
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
max_age=2592000
|
||||
)
|
||||
|
||||
# 添加信任主机中间件
|
||||
app.add_middleware(
|
||||
TrustedHostMiddleware,
|
||||
allowed_hosts=["*"] # 生产环境中应该设置具体的域名
|
||||
)
|
||||
|
||||
# 添加会话中间件
|
||||
app.add_middleware(
|
||||
SessionMiddleware,
|
||||
secret_key=settings.SECRET_KEY,
|
||||
max_age=2592000
|
||||
)
|
||||
|
||||
# 设置错误处理器
|
||||
@app.exception_handler(Exception)
|
||||
async def global_exception_handler(request, exc):
|
||||
return server_error_response(exc)
|
||||
|
||||
return app
|
||||
|
||||
def search_pages_path(pages_dir):
|
||||
"""搜索页面路径"""
|
||||
app_path_list = [
|
||||
path for path in pages_dir.glob("*_app_fastapi.py") if not path.name.startswith(".")
|
||||
]
|
||||
api_path_list = [
|
||||
path for path in pages_dir.glob("*sdk/*.py") if not path.name.startswith(".")
|
||||
]
|
||||
app_path_list.extend(api_path_list)
|
||||
return app_path_list
|
||||
|
||||
def register_page(app: FastAPI, page_path):
|
||||
"""注册页面路由"""
|
||||
path = f"{page_path}"
|
||||
|
||||
page_name = page_path.stem.removesuffix("_app_fastapi")
|
||||
module_name = ".".join(
|
||||
page_path.parts[page_path.parts.index("api"): -1] + (page_name,)
|
||||
)
|
||||
|
||||
spec = spec_from_file_location(module_name, page_path)
|
||||
page = module_from_spec(spec)
|
||||
page.app = app
|
||||
page.router = None # FastAPI使用router而不是Blueprint
|
||||
sys.modules[module_name] = page
|
||||
spec.loader.exec_module(page)
|
||||
page_name = getattr(page, "page_name", page_name)
|
||||
sdk_path = "\\sdk\\" if sys.platform.startswith("win") else "/sdk/"
|
||||
url_prefix = (
|
||||
f"/api/{API_VERSION}" if sdk_path in path else f"/{API_VERSION}/{page_name}"
|
||||
)
|
||||
|
||||
# 在FastAPI中,我们需要检查是否有router属性
|
||||
if hasattr(page, 'router') and page.router:
|
||||
app.include_router(page.router, prefix=url_prefix)
|
||||
return url_prefix
|
||||
|
||||
def setup_routes(app: FastAPI):
|
||||
"""设置路由 - 注册所有接口"""
|
||||
from api.apps.user_app_fastapi import router as user_router
|
||||
from api.apps.kb_app import router as kb_router
|
||||
from api.apps.document_app import router as document_router
|
||||
from api.apps.file_app import router as file_router
|
||||
from api.apps.file2document_app import router as file2document_router
|
||||
|
||||
app.include_router(user_router, prefix=f"/{API_VERSION}/user", tags=["User"])
|
||||
app.include_router(kb_router, prefix=f"/{API_VERSION}/kb", tags=["KB"])
|
||||
app.include_router(document_router, prefix=f"/{API_VERSION}/document", tags=["Document"])
|
||||
app.include_router(file_router, prefix=f"/{API_VERSION}/file", tags=["File"])
|
||||
app.include_router(file2document_router, prefix=f"/{API_VERSION}/file2document", tags=["File2Document"])
|
||||
|
||||
def get_current_user_from_token(authorization: str):
|
||||
"""从token获取当前用户"""
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
logging.warning("Authentication attempt with empty access token")
|
||||
return None
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
logging.warning(f"Authentication attempt with invalid token format: {len(access_token)} chars")
|
||||
return None
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
logging.warning(f"User {user[0].email} has empty access_token in database")
|
||||
return None
|
||||
return user[0]
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
||||
# 创建应用实例
|
||||
app = create_app()
|
||||
|
||||
@app.middleware("http")
|
||||
async def db_close_middleware(request, call_next):
|
||||
"""数据库连接关闭中间件"""
|
||||
try:
|
||||
response = await call_next(request)
|
||||
return response
|
||||
finally:
|
||||
close_connection()
|
||||
|
||||
setup_routes(app)
|
||||
898
api/apps/api_app.py
Normal file
898
api/apps/api_app.py
Normal file
@@ -0,0 +1,898 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from datetime import datetime, timedelta
|
||||
from flask import request, Response
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileType, LLMType, ParserType, FileSource
|
||||
from api.db.db_models import APIToken, Task, File
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import queue_tasks, TaskService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api import settings
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, get_json_result, validate_request, \
|
||||
generate_confirmation_token
|
||||
|
||||
from api.utils.file_utils import filename_type, thumbnail
|
||||
from rag.app.tag import label_question
|
||||
from rag.prompts.generator import keyword_extraction
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from agent.canvas import Canvas
|
||||
from functools import partial
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@manager.route('/new_token', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
req = request.json
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = tenants[0].tenant_id
|
||||
obj = {"tenant_id": tenant_id, "token": generate_confirmation_token(tenant_id),
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
"update_date": None
|
||||
}
|
||||
if req.get("canvas_id"):
|
||||
obj["dialog_id"] = req["canvas_id"]
|
||||
obj["source"] = "agent"
|
||||
else:
|
||||
obj["dialog_id"] = req["dialog_id"]
|
||||
|
||||
if not APITokenService.save(**obj):
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
|
||||
return get_json_result(data=obj)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/token_list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
id = request.args["dialog_id"] if "dialog_id" in request.args else request.args["canvas_id"]
|
||||
objs = APITokenService.query(tenant_id=tenants[0].tenant_id, dialog_id=id)
|
||||
return get_json_result(data=[o.to_dict() for o in objs])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("tokens", "tenant_id")
|
||||
@login_required
|
||||
def rm():
|
||||
req = request.json
|
||||
try:
|
||||
for token in req["tokens"]:
|
||||
APITokenService.filter_delete(
|
||||
[APIToken.tenant_id == req["tenant_id"], APIToken.token == token])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/stats', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def stats():
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
objs = API4ConversationService.stats(
|
||||
tenants[0].tenant_id,
|
||||
request.args.get(
|
||||
"from_date",
|
||||
(datetime.now() -
|
||||
timedelta(
|
||||
days=7)).strftime("%Y-%m-%d 00:00:00")),
|
||||
request.args.get(
|
||||
"to_date",
|
||||
datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
|
||||
"agent" if "canvas_id" in request.args else None)
|
||||
res = {
|
||||
"pv": [(o["dt"], o["pv"]) for o in objs],
|
||||
"uv": [(o["dt"], o["uv"]) for o in objs],
|
||||
"speed": [(o["dt"], float(o["tokens"]) / (float(o["duration"] + 0.1))) for o in objs],
|
||||
"tokens": [(o["dt"], float(o["tokens"]) / 1000.) for o in objs],
|
||||
"round": [(o["dt"], o["round"]) for o in objs],
|
||||
"thumb_up": [(o["dt"], o["thumb_up"]) for o in objs]
|
||||
}
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/new_conversation', methods=['GET']) # noqa: F821
|
||||
def set_conversation():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
if objs[0].source == "agent":
|
||||
e, cvs = UserCanvasService.get_by_id(objs[0].dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": request.args.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": canvas.get_prologue()}],
|
||||
"source": "agent"
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
else:
|
||||
e, dia = DialogService.get_by_id(objs[0].dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": get_uuid(),
|
||||
"dialog_id": dia.id,
|
||||
"user_id": request.args.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req:
|
||||
req["quote"] = False
|
||||
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
ans["id"] = message_id
|
||||
|
||||
def rename_field(ans):
|
||||
reference = ans['reference']
|
||||
if not isinstance(reference, dict):
|
||||
return
|
||||
for chunk_i in reference.get('chunks', []):
|
||||
if 'docnm_kwd' in chunk_i:
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
|
||||
try:
|
||||
if conv.source == "agent":
|
||||
stream = req.get("stream", True)
|
||||
conv.message.append(msg[-1])
|
||||
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "content": ""}
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
answer = canvas.run(stream=stream)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
if stream:
|
||||
assert isinstance(answer, partial), "Nothing. Is it over?"
|
||||
|
||||
def sse():
|
||||
nonlocal answer, cvs, conv
|
||||
try:
|
||||
for ans in answer():
|
||||
for k in ans.keys():
|
||||
final_ans[k] = ans[k]
|
||||
ans = {"answer": ans["content"], "reference": ans.get("reference", [])}
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
canvas.history.append(("assistant", final_ans["content"]))
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
result = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
fillin_conv(result)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
rename_field(result)
|
||||
return get_json_result(data=result)
|
||||
|
||||
# ******************For dialog******************
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
fillin_conv(ans)
|
||||
rename_field(ans)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = ans
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
rename_field(answer)
|
||||
return get_json_result(data=answer)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/conversation/<conversation_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_conversation(conversation_id):
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
if token != APIToken.query(dialog_id=conv['dialog_id'])[0].token:
|
||||
return get_json_result(data=False, message='Authentication error: API key is invalid for this conversation_id!"',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
for referenct_i in conv['reference']:
|
||||
if referenct_i is None or len(referenct_i) == 0:
|
||||
continue
|
||||
for chunk_i in referenct_i['chunks']:
|
||||
if 'docnm_kwd' in chunk_i.keys():
|
||||
chunk_i['doc_name'] = chunk_i['docnm_kwd']
|
||||
chunk_i.pop('docnm_kwd')
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/upload', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_name")
|
||||
def upload():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
kb_name = request.form.get("kb_name").strip()
|
||||
tenant_id = objs[0].tenant_id
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file = request.files['file']
|
||||
if file.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
kb_root_folder = FileService.get_kb_folder(tenant_id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
if DocumentService.get_doc_count(kb.tenant_id) >= int(os.environ.get('MAX_FILE_NUM_PER_USER', 8192)):
|
||||
return get_data_error_result(
|
||||
message="Exceed the maximum file number of a free user!")
|
||||
|
||||
filename = duplicate_name(
|
||||
DocumentService.query,
|
||||
name=file.filename,
|
||||
kb_id=kb_id)
|
||||
filetype = filename_type(filename)
|
||||
if not filetype:
|
||||
return get_data_error_result(
|
||||
message="This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
blob = request.files['file'].read()
|
||||
STORAGE_IMPL.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": kb.tenant_id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob),
|
||||
"suffix": Path(filename).suffix.lstrip("."),
|
||||
}
|
||||
|
||||
form_data = request.form
|
||||
if "parser_id" in form_data.keys():
|
||||
if request.form.get("parser_id").strip() in list(vars(ParserType).values())[1:-3]:
|
||||
doc["parser_id"] = request.form.get("parser_id").strip()
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if doc["type"] == FileType.AURAL:
|
||||
doc["parser_id"] = ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
if re.search(r"\.(eml)$", filename):
|
||||
doc["parser_id"] = ParserType.EMAIL.value
|
||||
|
||||
doc_result = DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
if "run" in form_data.keys():
|
||||
if request.form.get("run").strip() == "1":
|
||||
try:
|
||||
info = {"run": 1, "progress": 0}
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
DocumentService.update_by_id(doc["id"], info)
|
||||
# if str(req["run"]) == TaskStatus.CANCEL.value:
|
||||
tenant_id = DocumentService.get_tenant_id(doc["id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
# e, doc = DocumentService.get_by_id(doc["id"])
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
e, doc = DocumentService.get_by_id(doc["id"])
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=doc_result.to_json())
|
||||
|
||||
|
||||
@manager.route('/document/upload_and_parse', methods=['POST']) # noqa: F821
|
||||
@validate_request("conversation_id")
|
||||
async def upload_parse():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = request.files.getlist('file')
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = await doc_upload_and_parse(request.form.get("conversation_id"), file_objs, objs[0].tenant_id)
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@manager.route('/list_chunks', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_chunks():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
|
||||
try:
|
||||
if "doc_name" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id_by_name(req['doc_name'])
|
||||
doc_id = DocumentService.get_doc_id_by_doc_name(req['doc_name'])
|
||||
|
||||
elif "doc_id" in req.keys():
|
||||
tenant_id = DocumentService.get_tenant_id(req['doc_id'])
|
||||
doc_id = req['doc_id']
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message="Can't find doc_name or doc_id"
|
||||
)
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
|
||||
res = settings.retrievaler.chunk_list(doc_id, tenant_id, kb_ids)
|
||||
res = [
|
||||
{
|
||||
"content": res_item["content_with_weight"],
|
||||
"doc_name": res_item["docnm_kwd"],
|
||||
"image_id": res_item["img_id"]
|
||||
} for res_item in res
|
||||
]
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
@manager.route('/get_chunk/<chunk_id>', methods=['GET']) # noqa: F821
|
||||
# @login_required
|
||||
def get_chunk(chunk_id):
|
||||
from rag.nlp import search
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant_id), kb_ids)
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/list_kb_docs', methods=['POST']) # noqa: F821
|
||||
# @login_required
|
||||
def list_kb_docs():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
tenant_id = objs[0].tenant_id
|
||||
kb_name = req.get("kb_name", "").strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_name(kb_name, tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb_id = kb.id
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
page_number = int(req.get("page", 1))
|
||||
items_per_page = int(req.get("page_size", 15))
|
||||
orderby = req.get("orderby", "create_time")
|
||||
desc = req.get("desc", True)
|
||||
keywords = req.get("keywords", "")
|
||||
status = req.get("status", [])
|
||||
if status:
|
||||
invalid_status = {s for s in status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter status conditions: {', '.join(invalid_status)}"
|
||||
)
|
||||
types = req.get("types", [])
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(
|
||||
message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}"
|
||||
)
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(
|
||||
kb_id, page_number, items_per_page, orderby, desc, keywords, status, types)
|
||||
docs = [{"doc_id": doc['id'], "doc_name": doc['name']} for doc in docs]
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/document/infos', methods=['POST']) # noqa: F821
|
||||
@validate_request("doc_ids")
|
||||
def docinfos():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req = request.json
|
||||
doc_ids = req["doc_ids"]
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@manager.route('/document', methods=['DELETE']) # noqa: F821
|
||||
# @login_required
|
||||
def document_rm():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
tenant_id = objs[0].tenant_id
|
||||
req = request.json
|
||||
try:
|
||||
doc_ids = DocumentService.get_doc_ids_by_doc_names(req.get("doc_names", []))
|
||||
for doc_id in req.get("doc_ids", []):
|
||||
if doc_id not in doc_ids:
|
||||
doc_ids.append(doc_id)
|
||||
|
||||
if not doc_ids:
|
||||
return get_json_result(
|
||||
data=False, message="Can't find doc_names or doc_ids"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
errors = ""
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
doc_dic = {}
|
||||
for doc in docs:
|
||||
doc_dic[doc.id] = doc
|
||||
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
if doc_id not in doc_dic:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
doc = doc_dic[doc_id]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/completion_aibotk', methods=['POST']) # noqa: F821
|
||||
@validate_request("Authorization", "conversation_id", "word")
|
||||
def completion_faq():
|
||||
import base64
|
||||
req = request.json
|
||||
|
||||
token = req["Authorization"]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, conv = API4ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
if "quote" not in req:
|
||||
req["quote"] = True
|
||||
|
||||
msg = []
|
||||
msg.append({"role": "user", "content": req["word"]})
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
def fillin_conv(ans):
|
||||
nonlocal conv, message_id
|
||||
if not conv.reference:
|
||||
conv.reference.append(ans["reference"])
|
||||
else:
|
||||
conv.reference[-1] = ans["reference"]
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "id": message_id}
|
||||
ans["id"] = message_id
|
||||
|
||||
try:
|
||||
if conv.source == "agent":
|
||||
conv.message.append(msg[-1])
|
||||
e, cvs = UserCanvasService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return server_error_response("canvas not found.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
final_ans = {"reference": [], "doc_aggs": []}
|
||||
canvas = Canvas(cvs.dsl, objs[0].tenant_id)
|
||||
|
||||
canvas.messages.append(msg[-1])
|
||||
canvas.add_user_input(msg[-1]["content"])
|
||||
answer = canvas.run(stream=False)
|
||||
|
||||
assert answer is not None, "Nothing. Is it over?"
|
||||
|
||||
data_type_picture = {
|
||||
"type": 3,
|
||||
"url": "base64 content"
|
||||
}
|
||||
data = [
|
||||
{
|
||||
"type": 1,
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
final_ans["content"] = "\n".join(answer["content"]) if "content" in answer else ""
|
||||
canvas.messages.append({"role": "assistant", "content": final_ans["content"], "id": message_id})
|
||||
if final_ans.get("reference"):
|
||||
canvas.reference.append(final_ans["reference"])
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
|
||||
ans = {"answer": final_ans["content"], "reference": final_ans.get("reference", [])}
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = STORAGE_IMPL.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
break
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
# ******************For dialog******************
|
||||
conv.message.append(msg[-1])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
data_type_picture = {
|
||||
"type": 3,
|
||||
"url": "base64 content"
|
||||
}
|
||||
data = [
|
||||
{
|
||||
"type": 1,
|
||||
"content": ""
|
||||
}
|
||||
]
|
||||
ans = ""
|
||||
for a in chat(dia, msg, stream=False, **req):
|
||||
ans = a
|
||||
break
|
||||
data[0]["content"] += re.sub(r'##\d\$\$', '', ans["answer"])
|
||||
fillin_conv(ans)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
|
||||
chunk_idxs = [int(match[2]) for match in re.findall(r'##\d\$\$', ans["answer"])]
|
||||
for chunk_idx in chunk_idxs[:1]:
|
||||
if ans["reference"]["chunks"][chunk_idx]["img_id"]:
|
||||
try:
|
||||
bkt, nm = ans["reference"]["chunks"][chunk_idx]["img_id"].split("-")
|
||||
response = STORAGE_IMPL.get(bkt, nm)
|
||||
data_type_picture["url"] = base64.b64encode(response).decode('utf-8')
|
||||
data.append(data_type_picture)
|
||||
break
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
response = {"code": 200, "msg": "success", "data": data}
|
||||
return response
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval', methods=['POST']) # noqa: F821
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval():
|
||||
token = request.headers.get('Authorization').split()[1]
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
return get_json_result(
|
||||
data=False, message='Authentication error: API key is invalid!"', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
kb_ids = req.get("kb_id", [])
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
question = req.get("question")
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("page_size", 30))
|
||||
similarity_threshold = float(req.get("similarity_threshold", 0.2))
|
||||
vector_similarity_weight = float(req.get("vector_similarity_weight", 0.3))
|
||||
top = int(req.get("top_k", 1024))
|
||||
highlight = bool(req.get("highlight", False))
|
||||
|
||||
try:
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embd_nms = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embd_nms) != 1:
|
||||
return get_json_result(
|
||||
data=False, message='Knowledge bases use different embedding models or does not exist."',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
embd_mdl = LLMBundle(kbs[0].tenant_id, LLMType.EMBEDDING, llm_name=kbs[0].embd_id)
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = LLMBundle(kbs[0].tenant_id, LLMType.RERANK, llm_name=req["rerank_id"])
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = LLMBundle(kbs[0].tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, kbs[0].tenant_id, kb_ids, page, size,
|
||||
similarity_threshold, vector_similarity_weight, top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight= highlight,
|
||||
rank_feature=label_question(question, kbs))
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
76
api/apps/auth/README.md
Normal file
76
api/apps/auth/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# Auth
|
||||
|
||||
The Auth module provides implementations of OAuth2 and OpenID Connect (OIDC) authentication for integration with third-party identity providers.
|
||||
|
||||
**Features**
|
||||
|
||||
- Supports both OAuth2 and OIDC authentication protocols
|
||||
- Automatic OIDC configuration discovery (via `/.well-known/openid-configuration`)
|
||||
- JWT token validation
|
||||
- Unified user information handling
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
# OAuth2 configuration
|
||||
oauth_config = {
|
||||
"type": "oauth2",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"authorization_url": "https://your-oauth-provider.com/oauth/authorize",
|
||||
"token_url": "https://your-oauth-provider.com/oauth/token",
|
||||
"userinfo_url": "https://your-oauth-provider.com/oauth/userinfo",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# OIDC configuration
|
||||
oidc_config = {
|
||||
"type": "oidc",
|
||||
"issuer": "https://your-oauth-provider.com/oidc",
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Github OAuth configuration
|
||||
github_config = {
|
||||
"type": "github"
|
||||
"client_id": "your_client_id",
|
||||
"client_secret": "your_client_secret",
|
||||
"redirect_uri": "https://your-app.com/v1/user/oauth/callback/<channel>"
|
||||
}
|
||||
|
||||
# Get client instance
|
||||
client = get_auth_client(oauth_config)
|
||||
```
|
||||
|
||||
### Authentication Flow
|
||||
|
||||
1. Get authorization URL:
|
||||
```python
|
||||
auth_url = client.get_authorization_url()
|
||||
```
|
||||
|
||||
2. After user authorization, exchange authorization code for token:
|
||||
```python
|
||||
token_response = client.exchange_code_for_token(authorization_code)
|
||||
access_token = token_response["access_token"]
|
||||
```
|
||||
|
||||
3. Fetch user information:
|
||||
```python
|
||||
user_info = client.fetch_user_info(access_token)
|
||||
```
|
||||
|
||||
## User Information Structure
|
||||
|
||||
All authentication methods return user information following this structure:
|
||||
|
||||
```python
|
||||
{
|
||||
"email": "user@example.com",
|
||||
"username": "username",
|
||||
"nickname": "User Name",
|
||||
"avatar_url": "https://example.com/avatar.jpg"
|
||||
}
|
||||
```
|
||||
40
api/apps/auth/__init__.py
Normal file
40
api/apps/auth/__init__.py
Normal file
@@ -0,0 +1,40 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from .oauth import OAuthClient
|
||||
from .oidc import OIDCClient
|
||||
from .github import GithubOAuthClient
|
||||
|
||||
|
||||
CLIENT_TYPES = {
|
||||
"oauth2": OAuthClient,
|
||||
"oidc": OIDCClient,
|
||||
"github": GithubOAuthClient
|
||||
}
|
||||
|
||||
|
||||
def get_auth_client(config)->OAuthClient:
|
||||
channel_type = str(config.get("type", "")).lower()
|
||||
if channel_type == "":
|
||||
if config.get("issuer"):
|
||||
channel_type = "oidc"
|
||||
else:
|
||||
channel_type = "oauth2"
|
||||
client_class = CLIENT_TYPES.get(channel_type)
|
||||
if not client_class:
|
||||
raise ValueError(f"Unsupported type: {channel_type}")
|
||||
|
||||
return client_class(config)
|
||||
63
api/apps/auth/github.py
Normal file
63
api/apps/auth/github.py
Normal file
@@ -0,0 +1,63 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
from .oauth import OAuthClient, UserInfo
|
||||
|
||||
|
||||
class GithubOAuthClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the GithubOAuthClient with the provider's configuration.
|
||||
"""
|
||||
config.update({
|
||||
"authorization_url": "https://github.com/login/oauth/authorize",
|
||||
"token_url": "https://github.com/login/oauth/access_token",
|
||||
"userinfo_url": "https://api.github.com/user",
|
||||
"scope": "user:email"
|
||||
})
|
||||
super().__init__(config)
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch github user info.
|
||||
"""
|
||||
user_info = {}
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
# user info
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info.update(response.json())
|
||||
# email info
|
||||
response = requests.get(self.userinfo_url+"/emails", headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
email_info = response.json()
|
||||
user_info["email"] = next(
|
||||
(email for email in email_info if email["primary"]), None
|
||||
)["email"]
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch github user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("login", str(email).split("@")[0])
|
||||
nickname = user_info.get("name", username)
|
||||
avatar_url = user_info.get("avatar_url", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
110
api/apps/auth/oauth.py
Normal file
110
api/apps/auth/oauth.py
Normal file
@@ -0,0 +1,110 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import requests
|
||||
import urllib.parse
|
||||
|
||||
|
||||
class UserInfo:
|
||||
def __init__(self, email, username, nickname, avatar_url):
|
||||
self.email = email
|
||||
self.username = username
|
||||
self.nickname = nickname
|
||||
self.avatar_url = avatar_url
|
||||
|
||||
def to_dict(self):
|
||||
return {key: value for key, value in self.__dict__.items()}
|
||||
|
||||
|
||||
class OAuthClient:
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OAuthClient with the provider's configuration.
|
||||
"""
|
||||
self.client_id = config["client_id"]
|
||||
self.client_secret = config["client_secret"]
|
||||
self.authorization_url = config["authorization_url"]
|
||||
self.token_url = config["token_url"]
|
||||
self.userinfo_url = config["userinfo_url"]
|
||||
self.redirect_uri = config["redirect_uri"]
|
||||
self.scope = config.get("scope", None)
|
||||
|
||||
self.http_request_timeout = 7
|
||||
|
||||
|
||||
def get_authorization_url(self, state=None):
|
||||
"""
|
||||
Generate the authorization URL for user login.
|
||||
"""
|
||||
params = {
|
||||
"client_id": self.client_id,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"response_type": "code",
|
||||
}
|
||||
if self.scope:
|
||||
params["scope"] = self.scope
|
||||
if state:
|
||||
params["state"] = state
|
||||
authorization_url = f"{self.authorization_url}?{urllib.parse.urlencode(params)}"
|
||||
return authorization_url
|
||||
|
||||
|
||||
def exchange_code_for_token(self, code):
|
||||
"""
|
||||
Exchange authorization code for access token.
|
||||
"""
|
||||
try:
|
||||
payload = {
|
||||
"client_id": self.client_id,
|
||||
"client_secret": self.client_secret,
|
||||
"code": code,
|
||||
"redirect_uri": self.redirect_uri,
|
||||
"grant_type": "authorization_code"
|
||||
}
|
||||
response = requests.post(
|
||||
self.token_url,
|
||||
data=payload,
|
||||
headers={"Accept": "application/json"},
|
||||
timeout=self.http_request_timeout
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to exchange authorization code for token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, **kwargs):
|
||||
"""
|
||||
Fetch user information using access token.
|
||||
"""
|
||||
try:
|
||||
headers = {"Authorization": f"Bearer {access_token}"}
|
||||
response = requests.get(self.userinfo_url, headers=headers, timeout=self.http_request_timeout)
|
||||
response.raise_for_status()
|
||||
user_info = response.json()
|
||||
return self.normalize_user_info(user_info)
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch user info: {e}")
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
email = user_info.get("email")
|
||||
username = user_info.get("username", str(email).split("@")[0])
|
||||
nickname = user_info.get("nickname", username)
|
||||
avatar_url = user_info.get("avatar_url", None)
|
||||
if avatar_url is None:
|
||||
avatar_url = user_info.get("picture", "")
|
||||
return UserInfo(email=email, username=username, nickname=nickname, avatar_url=avatar_url)
|
||||
99
api/apps/auth/oidc.py
Normal file
99
api/apps/auth/oidc.py
Normal file
@@ -0,0 +1,99 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from .oauth import OAuthClient
|
||||
|
||||
|
||||
class OIDCClient(OAuthClient):
|
||||
def __init__(self, config):
|
||||
"""
|
||||
Initialize the OIDCClient with the provider's configuration.
|
||||
Use `issuer` as the single source of truth for configuration discovery.
|
||||
"""
|
||||
self.issuer = config.get("issuer")
|
||||
if not self.issuer:
|
||||
raise ValueError("Missing issuer in configuration.")
|
||||
|
||||
oidc_metadata = self._load_oidc_metadata(self.issuer)
|
||||
config.update({
|
||||
'issuer': oidc_metadata['issuer'],
|
||||
'jwks_uri': oidc_metadata['jwks_uri'],
|
||||
'authorization_url': oidc_metadata['authorization_endpoint'],
|
||||
'token_url': oidc_metadata['token_endpoint'],
|
||||
'userinfo_url': oidc_metadata['userinfo_endpoint']
|
||||
})
|
||||
|
||||
super().__init__(config)
|
||||
self.issuer = config['issuer']
|
||||
self.jwks_uri = config['jwks_uri']
|
||||
|
||||
|
||||
def _load_oidc_metadata(self, issuer):
|
||||
"""
|
||||
Load OIDC metadata from `/.well-known/openid-configuration`.
|
||||
"""
|
||||
try:
|
||||
metadata_url = f"{issuer}/.well-known/openid-configuration"
|
||||
response = requests.get(metadata_url, timeout=7)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise ValueError(f"Failed to fetch OIDC metadata: {e}")
|
||||
|
||||
|
||||
def parse_id_token(self, id_token):
|
||||
"""
|
||||
Parse and validate OIDC ID Token (JWT format) with signature verification.
|
||||
"""
|
||||
try:
|
||||
# Decode JWT header without verifying signature
|
||||
headers = jwt.get_unverified_header(id_token)
|
||||
|
||||
# OIDC usually uses `RS256` for signing
|
||||
alg = headers.get("alg", "RS256")
|
||||
|
||||
# Use PyJWT's PyJWKClient to fetch JWKS and find signing key
|
||||
jwks_cli = jwt.PyJWKClient(self.jwks_uri)
|
||||
signing_key = jwks_cli.get_signing_key_from_jwt(id_token).key
|
||||
|
||||
# Decode and verify signature
|
||||
decoded_token = jwt.decode(
|
||||
id_token,
|
||||
key=signing_key,
|
||||
algorithms=[alg],
|
||||
audience=str(self.client_id),
|
||||
issuer=self.issuer,
|
||||
)
|
||||
return decoded_token
|
||||
except Exception as e:
|
||||
raise ValueError(f"Error parsing ID Token: {e}")
|
||||
|
||||
|
||||
def fetch_user_info(self, access_token, id_token=None, **kwargs):
|
||||
"""
|
||||
Fetch user info.
|
||||
"""
|
||||
user_info = {}
|
||||
if id_token:
|
||||
user_info = self.parse_id_token(id_token)
|
||||
user_info.update(super().fetch_user_info(access_token).to_dict())
|
||||
return self.normalize_user_info(user_info)
|
||||
|
||||
|
||||
def normalize_user_info(self, user_info):
|
||||
return super().normalize_user_info(user_info)
|
||||
564
api/apps/canvas_app.py
Normal file
564
api/apps/canvas_app.py
Normal file
@@ -0,0 +1,564 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from functools import partial
|
||||
|
||||
import flask
|
||||
import trio
|
||||
from flask import request, Response
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from agent.component import LLM
|
||||
from api import settings
|
||||
from api.db import CanvasCategory, FileType
|
||||
from api.db.services.canvas_service import CanvasTemplateService, UserCanvasService, API4ConversationService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.pipeline_operation_log_service import PipelineOperationLogService
|
||||
from api.db.services.task_service import queue_dataflow, CANVAS_DEBUG_DOC_ID, TaskService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result, server_error_response, validate_request, get_data_error_result
|
||||
from agent.canvas import Canvas
|
||||
from peewee import MySQLDatabase, PostgresqlDatabase
|
||||
from api.db.db_models import APIToken, Task
|
||||
import time
|
||||
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf
|
||||
from rag.flow.pipeline import Pipeline
|
||||
from rag.nlp import search
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
|
||||
|
||||
@manager.route('/templates', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def templates():
|
||||
return get_json_result(data=[c.to_dict() for c in CanvasTemplateService.query(canvas_category=CanvasCategory.Agent)])
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@validate_request("canvas_ids")
|
||||
@login_required
|
||||
def rm():
|
||||
for i in request.json["canvas_ids"]:
|
||||
if not UserCanvasService.accessible(i, current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.delete_by_id(i)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@validate_request("dsl", "title")
|
||||
@login_required
|
||||
def save():
|
||||
req = request.json
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
cate = req.get("canvas_category", CanvasCategory.Agent)
|
||||
if "id" not in req:
|
||||
req["user_id"] = current_user.id
|
||||
if UserCanvasService.query(user_id=current_user.id, title=req["title"].strip(), canvas_category=cate):
|
||||
return get_data_error_result(message=f"{req['title'].strip()} already exists.")
|
||||
req["id"] = get_uuid()
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to save canvas.")
|
||||
else:
|
||||
if not UserCanvasService.accessible(req["id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
UserCanvasService.update_by_id(req["id"], req)
|
||||
# save version
|
||||
UserCanvasVersionService.insert(user_canvas_id=req["id"], dsl=req["dsl"], title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")))
|
||||
UserCanvasVersionService.delete_all_versions(req["id"])
|
||||
return get_json_result(data=req)
|
||||
|
||||
|
||||
@manager.route('/get/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get(canvas_id):
|
||||
if not UserCanvasService.accessible(canvas_id, current_user.id):
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
e, c = UserCanvasService.get_by_canvas_id(canvas_id)
|
||||
return get_json_result(data=c)
|
||||
|
||||
|
||||
@manager.route('/getsse/<canvas_id>', methods=['GET']) # type: ignore # noqa: F821
|
||||
def getsse(canvas_id):
|
||||
token = request.headers.get('Authorization').split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
tenant_id = objs[0].tenant_id
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=canvas_id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR
|
||||
)
|
||||
e, c = UserCanvasService.get_by_id(canvas_id)
|
||||
if not e or c.user_id != tenant_id:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
return get_json_result(data=c.to_dict())
|
||||
|
||||
|
||||
@manager.route('/completion', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def run():
|
||||
req = request.json
|
||||
query = req.get("query", "")
|
||||
files = req.get("files", [])
|
||||
inputs = req.get("inputs", {})
|
||||
user_id = req.get("user_id", current_user.id)
|
||||
if not UserCanvasService.accessible(req["id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
e, cvs = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
|
||||
if cvs.canvas_category == CanvasCategory.DataFlow:
|
||||
task_id = get_uuid()
|
||||
Pipeline(cvs.dsl, tenant_id=current_user.id, doc_id=CANVAS_DEBUG_DOC_ID, task_id=task_id, flow_id=req["id"])
|
||||
ok, error_message = queue_dataflow(tenant_id=user_id, flow_id=req["id"], task_id=task_id, file=files[0], priority=0)
|
||||
if not ok:
|
||||
return get_data_error_result(message=error_message)
|
||||
return get_json_result(data={"message_id": task_id})
|
||||
|
||||
try:
|
||||
canvas = Canvas(cvs.dsl, current_user.id, req["id"])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
def sse():
|
||||
nonlocal canvas, user_id
|
||||
try:
|
||||
for ans in canvas.run(query=query, files=files, user_id=user_id, inputs=inputs):
|
||||
yield "data:" + json.dumps(ans, ensure_ascii=False) + "\n\n"
|
||||
|
||||
cvs.dsl = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], cvs.to_dict())
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": False}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(sse(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route('/rerun', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "dsl", "component_id")
|
||||
@login_required
|
||||
def rerun():
|
||||
req = request.json
|
||||
doc = PipelineOperationLogService.get_documents_info(req["id"])
|
||||
if not doc:
|
||||
return get_data_error_result(message="Document not found.")
|
||||
doc = doc[0]
|
||||
if 0 < doc["progress"] < 1:
|
||||
return get_data_error_result(message=f"`{doc['name']}` is processing...")
|
||||
|
||||
if settings.docStoreConn.indexExist(search.index_name(current_user.id), doc["kb_id"]):
|
||||
settings.docStoreConn.delete({"doc_id": doc["id"]}, search.index_name(current_user.id), doc["kb_id"])
|
||||
doc["progress_msg"] = ""
|
||||
doc["chunk_num"] = 0
|
||||
doc["token_num"] = 0
|
||||
DocumentService.clear_chunk_num_when_rerun(doc["id"])
|
||||
DocumentService.update_by_id(id, doc)
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
|
||||
dsl = req["dsl"]
|
||||
dsl["path"] = [req["component_id"]]
|
||||
PipelineOperationLogService.update_by_id(req["id"], {"dsl": dsl})
|
||||
queue_dataflow(tenant_id=current_user.id, flow_id=req["id"], task_id=get_uuid(), doc_id=doc["id"], priority=0, rerun=True)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/cancel/<task_id>', methods=['PUT']) # noqa: F821
|
||||
@login_required
|
||||
def cancel(task_id):
|
||||
try:
|
||||
REDIS_CONN.set(f"{task_id}-cancel", "x")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/reset', methods=['POST']) # noqa: F821
|
||||
@validate_request("id")
|
||||
@login_required
|
||||
def reset():
|
||||
req = request.json
|
||||
if not UserCanvasService.accessible(req["id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.reset()
|
||||
req["dsl"] = json.loads(str(canvas))
|
||||
UserCanvasService.update_by_id(req["id"], {"dsl": req["dsl"]})
|
||||
return get_json_result(data=req["dsl"])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/upload/<canvas_id>", methods=["POST"]) # noqa: F821
|
||||
def upload(canvas_id):
|
||||
e, cvs = UserCanvasService.get_by_canvas_id(canvas_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
|
||||
user_id = cvs["user_id"]
|
||||
def structured(filename, filetype, blob, content_type):
|
||||
nonlocal user_id
|
||||
if filetype == FileType.PDF.value:
|
||||
blob = read_potential_broken_pdf(blob)
|
||||
|
||||
location = get_uuid()
|
||||
FileService.put_blob(user_id, location, blob)
|
||||
|
||||
return {
|
||||
"id": location,
|
||||
"name": filename,
|
||||
"size": sys.getsizeof(blob),
|
||||
"extension": filename.split(".")[-1].lower(),
|
||||
"mime_type": content_type,
|
||||
"created_by": user_id,
|
||||
"created_at": time.time(),
|
||||
"preview_url": None
|
||||
}
|
||||
|
||||
if request.args.get("url"):
|
||||
from crawl4ai import (
|
||||
AsyncWebCrawler,
|
||||
BrowserConfig,
|
||||
CrawlerRunConfig,
|
||||
DefaultMarkdownGenerator,
|
||||
PruningContentFilter,
|
||||
CrawlResult
|
||||
)
|
||||
try:
|
||||
url = request.args.get("url")
|
||||
filename = re.sub(r"\?.*", "", url.split("/")[-1])
|
||||
async def adownload():
|
||||
browser_config = BrowserConfig(
|
||||
headless=True,
|
||||
verbose=False,
|
||||
)
|
||||
async with AsyncWebCrawler(config=browser_config) as crawler:
|
||||
crawler_config = CrawlerRunConfig(
|
||||
markdown_generator=DefaultMarkdownGenerator(
|
||||
content_filter=PruningContentFilter()
|
||||
),
|
||||
pdf=True,
|
||||
screenshot=False
|
||||
)
|
||||
result: CrawlResult = await crawler.arun(
|
||||
url=url,
|
||||
config=crawler_config
|
||||
)
|
||||
return result
|
||||
page = trio.run(adownload())
|
||||
if page.pdf:
|
||||
if filename.split(".")[-1].lower() != "pdf":
|
||||
filename += ".pdf"
|
||||
return get_json_result(data=structured(filename, "pdf", page.pdf, page.response_headers["content-type"]))
|
||||
|
||||
return get_json_result(data=structured(filename, "html", str(page.markdown).encode("utf-8"), page.response_headers["content-type"], user_id))
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
file = request.files['file']
|
||||
try:
|
||||
DocumentService.check_doc_health(user_id, file.filename)
|
||||
return get_json_result(data=structured(file.filename, filename_type(file.filename), file.read(), file.content_type))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/input_form', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def input_form():
|
||||
cvs_id = request.args.get("id")
|
||||
cpn_id = request.args.get("component_id")
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(cvs_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
if not UserCanvasService.query(user_id=current_user.id, id=cvs_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
return get_json_result(data=canvas.get_component_input_form(cpn_id))
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/debug', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "component_id", "params")
|
||||
@login_required
|
||||
def debug():
|
||||
req = request.json
|
||||
if not UserCanvasService.accessible(req["id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
try:
|
||||
e, user_canvas = UserCanvasService.get_by_id(req["id"])
|
||||
canvas = Canvas(json.dumps(user_canvas.dsl), current_user.id)
|
||||
canvas.reset()
|
||||
canvas.message_id = get_uuid()
|
||||
component = canvas.get_component(req["component_id"])["obj"]
|
||||
component.reset()
|
||||
|
||||
if isinstance(component, LLM):
|
||||
component.set_debug_inputs(req["params"])
|
||||
component.invoke(**{k: o["value"] for k,o in req["params"].items()})
|
||||
outputs = component.output()
|
||||
for k in outputs.keys():
|
||||
if isinstance(outputs[k], partial):
|
||||
txt = ""
|
||||
for c in outputs[k]():
|
||||
txt += c
|
||||
outputs[k] = txt
|
||||
return get_json_result(data=outputs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/test_db_connect', methods=['POST']) # noqa: F821
|
||||
@validate_request("db_type", "database", "username", "host", "port", "password")
|
||||
@login_required
|
||||
def test_db_connect():
|
||||
req = request.json
|
||||
try:
|
||||
if req["db_type"] in ["mysql", "mariadb"]:
|
||||
db = MySQLDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
elif req["db_type"] == 'postgres':
|
||||
db = PostgresqlDatabase(req["database"], user=req["username"], host=req["host"], port=req["port"],
|
||||
password=req["password"])
|
||||
elif req["db_type"] == 'mssql':
|
||||
import pyodbc
|
||||
connection_string = (
|
||||
f"DRIVER={{ODBC Driver 17 for SQL Server}};"
|
||||
f"SERVER={req['host']},{req['port']};"
|
||||
f"DATABASE={req['database']};"
|
||||
f"UID={req['username']};"
|
||||
f"PWD={req['password']};"
|
||||
)
|
||||
db = pyodbc.connect(connection_string)
|
||||
cursor = db.cursor()
|
||||
cursor.execute("SELECT 1")
|
||||
cursor.close()
|
||||
elif req["db_type"] == 'IBM DB2':
|
||||
import ibm_db
|
||||
conn_str = (
|
||||
f"DATABASE={req['database']};"
|
||||
f"HOSTNAME={req['host']};"
|
||||
f"PORT={req['port']};"
|
||||
f"PROTOCOL=TCPIP;"
|
||||
f"UID={req['username']};"
|
||||
f"PWD={req['password']};"
|
||||
)
|
||||
logging.info(conn_str)
|
||||
conn = ibm_db.connect(conn_str, "", "")
|
||||
stmt = ibm_db.exec_immediate(conn, "SELECT 1 FROM sysibm.sysdummy1")
|
||||
ibm_db.fetch_assoc(stmt)
|
||||
ibm_db.close(conn)
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
else:
|
||||
return server_error_response("Unsupported database type.")
|
||||
if req["db_type"] != 'mssql':
|
||||
db.connect()
|
||||
db.close()
|
||||
|
||||
return get_json_result(data="Database Connection Successful!")
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
#api get list version dsl of canvas
|
||||
@manager.route('/getlistversion/<canvas_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def getlistversion(canvas_id):
|
||||
try:
|
||||
list =sorted([c.to_dict() for c in UserCanvasVersionService.list_by_canvas_id(canvas_id)], key=lambda x: x["update_time"]*-1)
|
||||
return get_json_result(data=list)
|
||||
except Exception as e:
|
||||
return get_data_error_result(message=f"Error getting history files: {e}")
|
||||
|
||||
|
||||
#api get version dsl of canvas
|
||||
@manager.route('/getversion/<version_id>', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def getversion( version_id):
|
||||
try:
|
||||
|
||||
e, version = UserCanvasVersionService.get_by_id(version_id)
|
||||
if version:
|
||||
return get_json_result(data=version.to_dict())
|
||||
except Exception as e:
|
||||
return get_json_result(data=f"Error getting history file: {e}")
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_canvas():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
canvas_category = request.args.get("canvas_category")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
owner_ids = [id for id in request.args.get("owner_ids", "").strip().split(",") if id]
|
||||
if not owner_ids:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
tenants = [m["tenant_id"] for m in tenants]
|
||||
tenants.append(current_user.id)
|
||||
canvas, total = UserCanvasService.get_by_tenant_ids(
|
||||
tenants, current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, canvas_category)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
canvas, total = UserCanvasService.get_by_tenant_ids(
|
||||
tenants, current_user.id, 0,
|
||||
0, orderby, desc, keywords, canvas_category)
|
||||
return get_json_result(data={"canvas": canvas, "total": total})
|
||||
|
||||
|
||||
@manager.route('/setting', methods=['POST']) # noqa: F821
|
||||
@validate_request("id", "title", "permission")
|
||||
@login_required
|
||||
def setting():
|
||||
req = request.json
|
||||
req["user_id"] = current_user.id
|
||||
|
||||
if not UserCanvasService.accessible(req["id"], current_user.id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
e,flow = UserCanvasService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="canvas not found.")
|
||||
flow = flow.to_dict()
|
||||
flow["title"] = req["title"]
|
||||
|
||||
for key in ["description", "permission", "avatar"]:
|
||||
if value := req.get(key):
|
||||
flow[key] = value
|
||||
|
||||
num= UserCanvasService.update_by_id(req["id"], flow)
|
||||
return get_json_result(data=num)
|
||||
|
||||
|
||||
@manager.route('/trace', methods=['GET']) # noqa: F821
|
||||
def trace():
|
||||
cvs_id = request.args.get("canvas_id")
|
||||
msg_id = request.args.get("message_id")
|
||||
try:
|
||||
bin = REDIS_CONN.get(f"{cvs_id}-{msg_id}-logs")
|
||||
if not bin:
|
||||
return get_json_result(data={})
|
||||
|
||||
return get_json_result(data=json.loads(bin.encode("utf-8")))
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
@manager.route('/<canvas_id>/sessions', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def sessions(canvas_id):
|
||||
tenant_id = current_user.id
|
||||
if not UserCanvasService.accessible(canvas_id, tenant_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of canvas authorized for this operation.',
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
user_id = request.args.get("user_id")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
keywords = request.args.get("keywords")
|
||||
from_date = request.args.get("from_date")
|
||||
to_date = request.args.get("to_date")
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
# dsl defaults to True in all cases except for False and false
|
||||
include_dsl = request.args.get("dsl") != "False" and request.args.get("dsl") != "false"
|
||||
total, sess = API4ConversationService.get_list(canvas_id, tenant_id, page_number, items_per_page, orderby, desc,
|
||||
None, user_id, include_dsl, keywords, from_date, to_date)
|
||||
try:
|
||||
return get_json_result(data={"total": total, "sessions": sess})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/prompts', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def prompts():
|
||||
from rag.prompts.generator import ANALYZE_TASK_SYSTEM, ANALYZE_TASK_USER, NEXT_STEP, REFLECT, CITATION_PROMPT_TEMPLATE
|
||||
return get_json_result(data={
|
||||
"task_analysis": ANALYZE_TASK_SYSTEM +"\n\n"+ ANALYZE_TASK_USER,
|
||||
"plan_generation": NEXT_STEP,
|
||||
"reflection": REFLECT,
|
||||
#"context_summary": SUMMARY4MEMORY,
|
||||
#"context_ranking": RANK_MEMORY,
|
||||
"citation_guidelines": CITATION_PROMPT_TEMPLATE
|
||||
})
|
||||
|
||||
|
||||
@manager.route('/download', methods=['GET']) # noqa: F821
|
||||
def download():
|
||||
id = request.args.get("id")
|
||||
created_by = request.args.get("created_by")
|
||||
blob = FileService.get_blob(created_by, id)
|
||||
return flask.make_response(blob)
|
||||
415
api/apps/chunk_app.py
Normal file
415
api/apps/chunk_app.py
Normal file
@@ -0,0 +1,415 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import datetime
|
||||
import json
|
||||
import re
|
||||
|
||||
import xxhash
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api import settings
|
||||
from api.db import LLMType, ParserType
|
||||
from api.db.services.dialog_service import meta_filter
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.search_service import SearchService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request
|
||||
from rag.app.qa import beAdoc, rmPrefix
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.prompts.generator import gen_meta_filter, cross_languages, keyword_extraction
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils import rmSpace
|
||||
|
||||
|
||||
@manager.route('/list', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id")
|
||||
def list_chunk():
|
||||
req = request.json
|
||||
doc_id = req["doc_id"]
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req.get("keywords", "")
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
query = {
|
||||
"doc_ids": [doc_id], "page": page, "size": size, "question": question, "sort": True
|
||||
}
|
||||
if "available_int" in req:
|
||||
query["available_int"] = int(req["available_int"])
|
||||
sres = settings.retrievaler.search(query, search.index_name(tenant_id), kb_ids, highlight=True)
|
||||
res = {"total": sres.total, "chunks": [], "doc": doc.to_dict()}
|
||||
for id in sres.ids:
|
||||
d = {
|
||||
"chunk_id": id,
|
||||
"content_with_weight": rmSpace(sres.highlight[id]) if question and id in sres.highlight else sres.field[
|
||||
id].get(
|
||||
"content_with_weight", ""),
|
||||
"doc_id": sres.field[id]["doc_id"],
|
||||
"docnm_kwd": sres.field[id]["docnm_kwd"],
|
||||
"important_kwd": sres.field[id].get("important_kwd", []),
|
||||
"question_kwd": sres.field[id].get("question_kwd", []),
|
||||
"image_id": sres.field[id].get("img_id", ""),
|
||||
"available_int": int(sres.field[id].get("available_int", 1)),
|
||||
"positions": sres.field[id].get("position_int", []),
|
||||
}
|
||||
assert isinstance(d["positions"], list)
|
||||
assert len(d["positions"]) == 0 or (isinstance(d["positions"][0], list) and len(d["positions"][0]) == 5)
|
||||
res["chunks"].append(d)
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, message='No chunk found!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
chunk_id = request.args["chunk_id"]
|
||||
try:
|
||||
chunk = None
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
for tenant in tenants:
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant.tenant_id)
|
||||
chunk = settings.docStoreConn.get(chunk_id, search.index_name(tenant.tenant_id), kb_ids)
|
||||
if chunk:
|
||||
break
|
||||
if chunk is None:
|
||||
return server_error_response(Exception("Chunk not found"))
|
||||
|
||||
k = []
|
||||
for n in chunk.keys():
|
||||
if re.search(r"(_vec$|_sm_|_tks|_ltks)", n):
|
||||
k.append(n)
|
||||
for n in k:
|
||||
del chunk[n]
|
||||
|
||||
return get_json_result(data=chunk)
|
||||
except Exception as e:
|
||||
if str(e).find("NotFoundError") >= 0:
|
||||
return get_json_result(data=False, message='Chunk not found!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "chunk_id", "content_with_weight")
|
||||
def set():
|
||||
req = request.json
|
||||
d = {
|
||||
"id": req["chunk_id"],
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_ltks"] = rag_tokenizer.tokenize(req["content_with_weight"])
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
if "important_kwd" in req:
|
||||
if not isinstance(req["important_kwd"], list):
|
||||
return get_data_error_result(message="`important_kwd` should be a list")
|
||||
d["important_kwd"] = req["important_kwd"]
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(req["important_kwd"]))
|
||||
if "question_kwd" in req:
|
||||
if not isinstance(req["question_kwd"], list):
|
||||
return get_data_error_result(message="`question_kwd` should be a list")
|
||||
d["question_kwd"] = req["question_kwd"]
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(req["question_kwd"]))
|
||||
if "tag_kwd" in req:
|
||||
d["tag_kwd"] = req["tag_kwd"]
|
||||
if "tag_feas" in req:
|
||||
d["tag_feas"] = req["tag_feas"]
|
||||
if "available_int" in req:
|
||||
d["available_int"] = req["available_int"]
|
||||
|
||||
try:
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embd_id)
|
||||
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if doc.parser_id == ParserType.QA:
|
||||
arr = [
|
||||
t for t in re.split(
|
||||
r"[\n\t]",
|
||||
req["content_with_weight"]) if len(t) > 1]
|
||||
q, a = rmPrefix(arr[0]), rmPrefix("\n".join(arr[1:]))
|
||||
d = beAdoc(d, q, a, not any(
|
||||
[rag_tokenizer.is_chinese(t) for t in q + a]))
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d.get("question_kwd") else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1] if doc.parser_id != ParserType.QA else v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.update({"id": req["chunk_id"]}, d, search.index_name(tenant_id), doc.kb_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/switch', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "available_int", "doc_id")
|
||||
def switch():
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
for cid in req["chunk_ids"]:
|
||||
if not settings.docStoreConn.update({"id": cid},
|
||||
{"available_int": int(req["available_int"])},
|
||||
search.index_name(DocumentService.get_tenant_id(req["doc_id"])),
|
||||
doc.kb_id):
|
||||
return get_data_error_result(message="Index updating failure")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("chunk_ids", "doc_id")
|
||||
def rm():
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
req = request.json
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if not settings.docStoreConn.delete({"id": req["chunk_ids"]},
|
||||
search.index_name(DocumentService.get_tenant_id(req["doc_id"])),
|
||||
doc.kb_id):
|
||||
return get_data_error_result(message="Chunk deleting failure")
|
||||
deleted_chunk_ids = req["chunk_ids"]
|
||||
chunk_number = len(deleted_chunk_ids)
|
||||
DocumentService.decrement_chunk_num(doc.id, doc.kb_id, 1, chunk_number, 0)
|
||||
for cid in deleted_chunk_ids:
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/create', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("doc_id", "content_with_weight")
|
||||
def create():
|
||||
req = request.json
|
||||
chunck_id = xxhash.xxh64((req["content_with_weight"] + req["doc_id"]).encode("utf-8")).hexdigest()
|
||||
d = {"id": chunck_id, "content_ltks": rag_tokenizer.tokenize(req["content_with_weight"]),
|
||||
"content_with_weight": req["content_with_weight"]}
|
||||
d["content_sm_ltks"] = rag_tokenizer.fine_grained_tokenize(d["content_ltks"])
|
||||
d["important_kwd"] = req.get("important_kwd", [])
|
||||
if not isinstance(d["important_kwd"], list):
|
||||
return get_data_error_result(message="`important_kwd` is required to be a list")
|
||||
d["important_tks"] = rag_tokenizer.tokenize(" ".join(d["important_kwd"]))
|
||||
d["question_kwd"] = req.get("question_kwd", [])
|
||||
if not isinstance(d["question_kwd"], list):
|
||||
return get_data_error_result(message="`question_kwd` is required to be a list")
|
||||
d["question_tks"] = rag_tokenizer.tokenize("\n".join(d["question_kwd"]))
|
||||
d["create_time"] = str(datetime.datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.datetime.now().timestamp()
|
||||
if "tag_feas" in req:
|
||||
d["tag_feas"] = req["tag_feas"]
|
||||
if "tag_feas" in req:
|
||||
d["tag_feas"] = req["tag_feas"]
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req["doc_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
d["kb_id"] = [doc.kb_id]
|
||||
d["docnm_kwd"] = doc.name
|
||||
d["title_tks"] = rag_tokenizer.tokenize(doc.name)
|
||||
d["doc_id"] = doc.id
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(req["doc_id"])
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
if kb.pagerank:
|
||||
d[PAGERANK_FLD] = kb.pagerank
|
||||
|
||||
embd_id = DocumentService.get_embd_id(req["doc_id"])
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING.value, embd_id)
|
||||
|
||||
v, c = embd_mdl.encode([doc.name, req["content_with_weight"] if not d["question_kwd"] else "\n".join(d["question_kwd"])])
|
||||
v = 0.1 * v[0] + 0.9 * v[1]
|
||||
d["q_%d_vec" % len(v)] = v.tolist()
|
||||
settings.docStoreConn.insert([d], search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc.id, doc.kb_id, c, 1, 0)
|
||||
return get_json_result(data={"chunk_id": chunck_id})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/retrieval_test', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("kb_id", "question")
|
||||
def retrieval_test():
|
||||
req = request.json
|
||||
page = int(req.get("page", 1))
|
||||
size = int(req.get("size", 30))
|
||||
question = req["question"]
|
||||
kb_ids = req["kb_id"]
|
||||
if isinstance(kb_ids, str):
|
||||
kb_ids = [kb_ids]
|
||||
if not kb_ids:
|
||||
return get_json_result(data=False, message='Please specify dataset firstly.',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
|
||||
doc_ids = req.get("doc_ids", [])
|
||||
use_kg = req.get("use_kg", False)
|
||||
top = int(req.get("top_k", 1024))
|
||||
langs = req.get("cross_languages", [])
|
||||
tenant_ids = []
|
||||
|
||||
if req.get("search_id", ""):
|
||||
search_config = SearchService.get_detail(req.get("search_id", "")).get("search_config", {})
|
||||
meta_data_filter = search_config.get("meta_data_filter", {})
|
||||
metas = DocumentService.get_meta_by_kbs(kb_ids)
|
||||
if meta_data_filter.get("method") == "auto":
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT, llm_name=search_config.get("chat_id", ""))
|
||||
filters = gen_meta_filter(chat_mdl, metas, question)
|
||||
doc_ids.extend(meta_filter(metas, filters))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
elif meta_data_filter.get("method") == "manual":
|
||||
doc_ids.extend(meta_filter(metas, meta_data_filter["manual"]))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for kb_id in kb_ids:
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
tenant_ids.append(tenant.tenant_id)
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_ids[0])
|
||||
if not e:
|
||||
return get_data_error_result(message="Knowledgebase not found!")
|
||||
|
||||
if langs:
|
||||
question = cross_languages(kb.tenant_id, None, question, langs)
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
|
||||
rerank_mdl = None
|
||||
if req.get("rerank_id"):
|
||||
rerank_mdl = LLMBundle(kb.tenant_id, LLMType.RERANK.value, llm_name=req["rerank_id"])
|
||||
|
||||
if req.get("keyword", False):
|
||||
chat_mdl = LLMBundle(kb.tenant_id, LLMType.CHAT)
|
||||
question += keyword_extraction(chat_mdl, question)
|
||||
|
||||
labels = label_question(question, [kb])
|
||||
ranks = settings.retrievaler.retrieval(question, embd_mdl, tenant_ids, kb_ids, page, size,
|
||||
float(req.get("similarity_threshold", 0.0)),
|
||||
float(req.get("vector_similarity_weight", 0.3)),
|
||||
top,
|
||||
doc_ids, rerank_mdl=rerank_mdl, highlight=req.get("highlight"),
|
||||
rank_feature=labels
|
||||
)
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
tenant_ids,
|
||||
kb_ids,
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
for c in ranks["chunks"]:
|
||||
c.pop("vector", None)
|
||||
ranks["labels"] = labels
|
||||
|
||||
return get_json_result(data=ranks)
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return get_json_result(data=False, message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.DATA_ERROR)
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def knowledge_graph():
|
||||
doc_id = request.args["doc_id"]
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(tenant_id)
|
||||
req = {
|
||||
"doc_ids": [doc_id],
|
||||
"knowledge_graph_kwd": ["graph", "mind_map"]
|
||||
}
|
||||
sres = settings.retrievaler.search(req, search.index_name(tenant_id), kb_ids)
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
for id in sres.ids[:2]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
if ty == 'mind_map':
|
||||
node_dict = {}
|
||||
|
||||
def repeat_deal(content_json, node_dict):
|
||||
if 'id' in content_json:
|
||||
if content_json['id'] in node_dict:
|
||||
node_name = content_json['id']
|
||||
content_json['id'] += f"({node_dict[content_json['id']]})"
|
||||
node_dict[node_name] += 1
|
||||
else:
|
||||
node_dict[content_json['id']] = 1
|
||||
if 'children' in content_json and content_json['children']:
|
||||
for item in content_json['children']:
|
||||
repeat_deal(item, node_dict)
|
||||
|
||||
repeat_deal(content_json, node_dict)
|
||||
|
||||
obj[ty] = content_json
|
||||
|
||||
return get_json_result(data=obj)
|
||||
419
api/apps/conversation_app.py
Normal file
419
api/apps/conversation_app.py
Normal file
@@ -0,0 +1,419 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
from copy import deepcopy
|
||||
from flask import Response, request
|
||||
from flask_login import current_user, login_required
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.conversation_service import ConversationService, structure_answer
|
||||
from api.db.services.dialog_service import DialogService, ask, chat, gen_mindmap
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.search_service import SearchService
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request
|
||||
from rag.prompts.template import load_prompt
|
||||
from rag.prompts.generator import chunks_format
|
||||
|
||||
|
||||
@manager.route("/set", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def set_conversation():
|
||||
req = request.json
|
||||
conv_id = req.get("conversation_id")
|
||||
is_new = req.get("is_new")
|
||||
name = req.get("name", "New conversation")
|
||||
req["user_id"] = current_user.id
|
||||
|
||||
if len(name) > 255:
|
||||
name = name[0:255]
|
||||
|
||||
del req["is_new"]
|
||||
if not is_new:
|
||||
del req["conversation_id"]
|
||||
try:
|
||||
if not ConversationService.update_by_id(conv_id, req):
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to update a conversation!")
|
||||
conv = conv.to_dict()
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(req["dialog_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found")
|
||||
conv = {
|
||||
"id": conv_id,
|
||||
"dialog_id": req["dialog_id"],
|
||||
"name": name,
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"]}],
|
||||
"user_id": current_user.id,
|
||||
"reference": [],
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/get", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
conv_id = request.args["conversation_id"]
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(conv_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
avatar = None
|
||||
for tenant in tenants:
|
||||
dialog = DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id)
|
||||
if dialog and len(dialog) > 0:
|
||||
avatar = dialog[0].icon
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
for ref in conv.reference:
|
||||
if isinstance(ref, list):
|
||||
continue
|
||||
ref["chunks"] = chunks_format(ref)
|
||||
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"] = avatar
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/getsse/<dialog_id>", methods=["GET"]) # type: ignore # noqa: F821
|
||||
def getsse(dialog_id):
|
||||
token = request.headers.get("Authorization").split()
|
||||
if len(token) != 2:
|
||||
return get_data_error_result(message='Authorization is not valid!"')
|
||||
token = token[1]
|
||||
objs = APIToken.query(beta=token)
|
||||
if not objs:
|
||||
return get_data_error_result(message='Authentication error: API key is invalid!"')
|
||||
try:
|
||||
e, conv = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
conv = conv.to_dict()
|
||||
conv["avatar"] = conv["icon"]
|
||||
del conv["icon"]
|
||||
return get_json_result(data=conv)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def rm():
|
||||
conv_ids = request.json["conversation_ids"]
|
||||
try:
|
||||
for cid in conv_ids:
|
||||
exist, conv = ConversationService.get_by_id(cid)
|
||||
if not exist:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=conv.dialog_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Only owner of conversation authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
ConversationService.delete_by_id(cid)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def list_conversation():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
if not DialogService.query(tenant_id=current_user.id, id=dialog_id):
|
||||
return get_json_result(data=False, message="Only owner of dialog authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
convs = ConversationService.query(dialog_id=dialog_id, order_by=ConversationService.model.create_time, reverse=True)
|
||||
|
||||
convs = [d.to_dict() for d in convs]
|
||||
return get_json_result(data=convs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/completion", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "messages")
|
||||
def completion():
|
||||
req = request.json
|
||||
msg = []
|
||||
for m in req["messages"]:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
chat_model_id = req.get("llm_id", "")
|
||||
req.pop("llm_id", None)
|
||||
|
||||
chat_model_config = {}
|
||||
for model_config in [
|
||||
"temperature",
|
||||
"top_p",
|
||||
"frequency_penalty",
|
||||
"presence_penalty",
|
||||
"max_tokens",
|
||||
]:
|
||||
config = req.get(model_config)
|
||||
if config:
|
||||
chat_model_config[model_config] = config
|
||||
|
||||
try:
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
conv.message = deepcopy(req["messages"])
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
del req["conversation_id"]
|
||||
del req["messages"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference = [r for r in conv.reference if r]
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if chat_model_id:
|
||||
if not TenantLLMService.get_api_key(tenant_id=dia.tenant_id, model_name=chat_model_id):
|
||||
req.pop("chat_model_id", None)
|
||||
req.pop("chat_model_config", None)
|
||||
return get_data_error_result(message=f"Cannot use specified model {chat_model_id}.")
|
||||
dia.llm_id = chat_model_id
|
||||
dia.llm_setting = chat_model_config
|
||||
|
||||
is_embedded = bool(chat_model_id)
|
||||
def stream():
|
||||
nonlocal dia, msg, req, conv
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **req):
|
||||
ans = structure_answer(conv, ans, message_id, conv.id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
if not is_embedded:
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
if req.get("stream", True):
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, **req):
|
||||
answer = structure_answer(conv, ans, message_id, conv.id)
|
||||
if not is_embedded:
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
return get_json_result(data=answer)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/tts", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def tts():
|
||||
req = request.json
|
||||
text = req["text"]
|
||||
|
||||
tenants = TenantService.get_info_by(current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tts_id = tenants[0]["tts_id"]
|
||||
if not tts_id:
|
||||
return get_data_error_result(message="No default TTS model is set")
|
||||
|
||||
tts_mdl = LLMBundle(tenants[0]["tenant_id"], LLMType.TTS, tts_id)
|
||||
|
||||
def stream_audio():
|
||||
try:
|
||||
for txt in re.split(r"[,。/《》?;:!\n\r:;]+", text):
|
||||
for chunk in tts_mdl.tts(txt):
|
||||
yield chunk
|
||||
except Exception as e:
|
||||
yield ("data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e)}}, ensure_ascii=False)).encode("utf-8")
|
||||
|
||||
resp = Response(stream_audio(), mimetype="audio/mpeg")
|
||||
resp.headers.add_header("Cache-Control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route("/delete_msg", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def delete_msg():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
if req["message_id"] != msg.get("id", ""):
|
||||
continue
|
||||
assert conv["message"][i + 1]["id"] == req["message_id"]
|
||||
conv["message"].pop(i)
|
||||
conv["message"].pop(i)
|
||||
conv["reference"].pop(max(0, i // 2 - 1))
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route("/thumbup", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("conversation_id", "message_id")
|
||||
def thumbup():
|
||||
req = request.json
|
||||
e, conv = ConversationService.get_by_id(req["conversation_id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Conversation not found!")
|
||||
up_down = req.get("thumbup")
|
||||
feedback = req.get("feedback", "")
|
||||
conv = conv.to_dict()
|
||||
for i, msg in enumerate(conv["message"]):
|
||||
if req["message_id"] == msg.get("id", "") and msg.get("role", "") == "assistant":
|
||||
if up_down:
|
||||
msg["thumbup"] = True
|
||||
if "feedback" in msg:
|
||||
del msg["feedback"]
|
||||
else:
|
||||
msg["thumbup"] = False
|
||||
if feedback:
|
||||
msg["feedback"] = feedback
|
||||
break
|
||||
|
||||
ConversationService.update_by_id(conv["id"], conv)
|
||||
return get_json_result(data=conv)
|
||||
|
||||
|
||||
@manager.route("/ask", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def ask_about():
|
||||
req = request.json
|
||||
uid = current_user.id
|
||||
|
||||
search_id = req.get("search_id", "")
|
||||
search_app = None
|
||||
search_config = {}
|
||||
if search_id:
|
||||
search_app = SearchService.get_detail(search_id)
|
||||
if search_app:
|
||||
search_config = search_app.get("search_config", {})
|
||||
|
||||
def stream():
|
||||
nonlocal req, uid
|
||||
try:
|
||||
for ans in ask(req["question"], req["kb_ids"], uid, search_config=search_config):
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e), "data": {"answer": "**ERROR**: " + str(e), "reference": []}}, ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
resp = Response(stream(), mimetype="text/event-stream")
|
||||
resp.headers.add_header("Cache-control", "no-cache")
|
||||
resp.headers.add_header("Connection", "keep-alive")
|
||||
resp.headers.add_header("X-Accel-Buffering", "no")
|
||||
resp.headers.add_header("Content-Type", "text/event-stream; charset=utf-8")
|
||||
return resp
|
||||
|
||||
|
||||
@manager.route("/mindmap", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question", "kb_ids")
|
||||
def mindmap():
|
||||
req = request.json
|
||||
search_id = req.get("search_id", "")
|
||||
search_app = SearchService.get_detail(search_id) if search_id else {}
|
||||
search_config = search_app.get("search_config", {}) if search_app else {}
|
||||
kb_ids = search_config.get("kb_ids", [])
|
||||
kb_ids.extend(req["kb_ids"])
|
||||
kb_ids = list(set(kb_ids))
|
||||
|
||||
mind_map = gen_mindmap(req["question"], kb_ids, search_app.get("tenant_id", current_user.id), search_config)
|
||||
if "error" in mind_map:
|
||||
return server_error_response(Exception(mind_map["error"]))
|
||||
return get_json_result(data=mind_map)
|
||||
|
||||
|
||||
@manager.route("/related_questions", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("question")
|
||||
def related_questions():
|
||||
req = request.json
|
||||
|
||||
search_id = req.get("search_id", "")
|
||||
search_config = {}
|
||||
if search_id:
|
||||
if search_app := SearchService.get_detail(search_id):
|
||||
search_config = search_app.get("search_config", {})
|
||||
|
||||
question = req["question"]
|
||||
|
||||
chat_id = search_config.get("chat_id", "")
|
||||
chat_mdl = LLMBundle(current_user.id, LLMType.CHAT, chat_id)
|
||||
|
||||
gen_conf = search_config.get("llm_setting", {"temperature": 0.9})
|
||||
if "parameter" in gen_conf:
|
||||
del gen_conf["parameter"]
|
||||
prompt = load_prompt("related_question")
|
||||
ans = chat_mdl.chat(
|
||||
prompt,
|
||||
[
|
||||
{
|
||||
"role": "user",
|
||||
"content": f"""
|
||||
Keywords: {question}
|
||||
Related search terms:
|
||||
""",
|
||||
}
|
||||
],
|
||||
gen_conf,
|
||||
)
|
||||
return get_json_result(data=[re.sub(r"^[0-9]\. ", "", a) for a in ans.split("\n") if re.match(r"^[0-9]\. ", a)])
|
||||
227
api/apps/dialog_app.py
Normal file
227
api/apps/dialog_app.py
Normal file
@@ -0,0 +1,227 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_json_result
|
||||
|
||||
|
||||
@manager.route('/set', methods=['POST']) # noqa: F821
|
||||
@validate_request("prompt_config")
|
||||
@login_required
|
||||
def set_dialog():
|
||||
req = request.json
|
||||
dialog_id = req.get("dialog_id", "")
|
||||
is_create = not dialog_id
|
||||
name = req.get("name", "New Dialog")
|
||||
if not isinstance(name, str):
|
||||
return get_data_error_result(message="Dialog name must be string.")
|
||||
if name.strip() == "":
|
||||
return get_data_error_result(message="Dialog name can't be empty.")
|
||||
if len(name.encode("utf-8")) > 255:
|
||||
return get_data_error_result(message=f"Dialog name length is {len(name)} which is larger than 255")
|
||||
|
||||
if is_create and DialogService.query(tenant_id=current_user.id, name=name.strip()):
|
||||
name = name.strip()
|
||||
name = duplicate_name(
|
||||
DialogService.query,
|
||||
name=name,
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)
|
||||
|
||||
description = req.get("description", "A helpful dialog")
|
||||
icon = req.get("icon", "")
|
||||
top_n = req.get("top_n", 6)
|
||||
top_k = req.get("top_k", 1024)
|
||||
rerank_id = req.get("rerank_id", "")
|
||||
if not rerank_id:
|
||||
req["rerank_id"] = ""
|
||||
similarity_threshold = req.get("similarity_threshold", 0.1)
|
||||
vector_similarity_weight = req.get("vector_similarity_weight", 0.3)
|
||||
llm_setting = req.get("llm_setting", {})
|
||||
meta_data_filter = req.get("meta_data_filter", {})
|
||||
prompt_config = req["prompt_config"]
|
||||
|
||||
if not is_create:
|
||||
if not req.get("kb_ids", []) and not prompt_config.get("tavily_api_key") and "{knowledge}" in prompt_config['system']:
|
||||
return get_data_error_result(message="Please remove `{knowledge}` in system prompt since no knowledge base / Tavily used here.")
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if prompt_config["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_data_error_result(
|
||||
message="Parameter '{}' is not used".format(p["key"]))
|
||||
|
||||
try:
|
||||
e, tenant = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
kbs = KnowledgebaseService.get_by_ids(req.get("kb_ids", []))
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = len(set(embd_ids))
|
||||
if embd_count > 1:
|
||||
return get_data_error_result(message=f'Datasets use different embedding models: {[kb.embd_id for kb in kbs]}"')
|
||||
|
||||
llm_id = req.get("llm_id", tenant.llm_id)
|
||||
if not dialog_id:
|
||||
dia = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": name,
|
||||
"kb_ids": req.get("kb_ids", []),
|
||||
"description": description,
|
||||
"llm_id": llm_id,
|
||||
"llm_setting": llm_setting,
|
||||
"prompt_config": prompt_config,
|
||||
"meta_data_filter": meta_data_filter,
|
||||
"top_n": top_n,
|
||||
"top_k": top_k,
|
||||
"rerank_id": rerank_id,
|
||||
"similarity_threshold": similarity_threshold,
|
||||
"vector_similarity_weight": vector_similarity_weight,
|
||||
"icon": icon
|
||||
}
|
||||
if not DialogService.save(**dia):
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
return get_json_result(data=dia)
|
||||
else:
|
||||
del req["dialog_id"]
|
||||
if "kb_names" in req:
|
||||
del req["kb_names"]
|
||||
if not DialogService.update_by_id(dialog_id, req):
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Fail to update a dialog!")
|
||||
dia = dia.to_dict()
|
||||
dia.update(req)
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/get', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def get():
|
||||
dialog_id = request.args["dialog_id"]
|
||||
try:
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Dialog not found!")
|
||||
dia = dia.to_dict()
|
||||
dia["kb_ids"], dia["kb_names"] = get_kb_names(dia["kb_ids"])
|
||||
return get_json_result(data=dia)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
def get_kb_names(kb_ids):
|
||||
ids, nms = [], []
|
||||
for kid in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kid)
|
||||
if not e or kb.status != StatusEnum.VALID.value:
|
||||
continue
|
||||
ids.append(kid)
|
||||
nms.append(kb.name)
|
||||
return ids, nms
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_dialogs():
|
||||
try:
|
||||
diags = DialogService.query(
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value,
|
||||
reverse=True,
|
||||
order_by=DialogService.model.create_time)
|
||||
diags = [d.to_dict() for d in diags]
|
||||
for d in diags:
|
||||
d["kb_ids"], d["kb_names"] = get_kb_names(d["kb_ids"])
|
||||
return get_json_result(data=diags)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/next', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
def list_dialogs_next():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
parser_id = request.args.get("parser_id")
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
owner_ids = req.get("owner_ids", [])
|
||||
try:
|
||||
if not owner_ids:
|
||||
# tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
# tenants = [tenant["tenant_id"] for tenant in tenants]
|
||||
tenants = [] # keep it here
|
||||
dialogs, total = DialogService.get_by_tenant_ids(
|
||||
tenants, current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, parser_id)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
dialogs, total = DialogService.get_by_tenant_ids(
|
||||
tenants, current_user.id, 0,
|
||||
0, orderby, desc, keywords, parser_id)
|
||||
dialogs = [dialog for dialog in dialogs if dialog["tenant_id"] in tenants]
|
||||
total = len(dialogs)
|
||||
if page_number and items_per_page:
|
||||
dialogs = dialogs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||
return get_json_result(data={"dialogs": dialogs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/rm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("dialog_ids")
|
||||
def rm():
|
||||
req = request.json
|
||||
dialog_list=[]
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
try:
|
||||
for id in req["dialog_ids"]:
|
||||
for tenant in tenants:
|
||||
if DialogService.query(tenant_id=tenant.tenant_id, id=id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of dialog authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
dialog_list.append({"id": id,"status":StatusEnum.INVALID.value})
|
||||
DialogService.update_many_by_id(dialog_list)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
873
api/apps/document_app.py
Normal file
873
api/apps/document_app.py
Normal file
@@ -0,0 +1,873 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import json
|
||||
import os.path
|
||||
import pathlib
|
||||
import re
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, Query
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
|
||||
from api import settings
|
||||
from api.common.check_team_permission import check_kb_team_permission
|
||||
from api.constants import FILE_NAME_LEN_LIMIT, IMG_BASE64_PREFIX
|
||||
from api.db import VALID_FILE_TYPES, VALID_TASK_STATUS, FileSource, FileType, ParserType, TaskStatus
|
||||
from api.db.db_models import File, Task
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService, doc_upload_and_parse
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import TaskService, cancel_all_task_of, queue_tasks, queue_dataflow
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils.file_utils import filename_type, get_project_base_directory, thumbnail
|
||||
from api.utils.web_utils import CONTENT_TYPE_MAP, html2pdf, is_valid_url
|
||||
from deepdoc.parser.html_parser import RAGFlowHtmlParser
|
||||
from rag.nlp import search
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from pydantic import BaseModel
|
||||
from api.db.db_models import User
|
||||
|
||||
# Security
|
||||
security = HTTPBearer()
|
||||
|
||||
# Pydantic models for request/response
|
||||
class WebCrawlRequest(BaseModel):
|
||||
kb_id: str
|
||||
name: str
|
||||
url: str
|
||||
|
||||
class CreateDocumentRequest(BaseModel):
|
||||
name: str
|
||||
kb_id: str
|
||||
|
||||
class DocumentListRequest(BaseModel):
|
||||
run_status: List[str] = []
|
||||
types: List[str] = []
|
||||
suffix: List[str] = []
|
||||
|
||||
class DocumentFilterRequest(BaseModel):
|
||||
kb_id: str
|
||||
keywords: str = ""
|
||||
run_status: List[str] = []
|
||||
types: List[str] = []
|
||||
suffix: List[str] = []
|
||||
|
||||
class DocumentInfosRequest(BaseModel):
|
||||
doc_ids: List[str]
|
||||
|
||||
class ChangeStatusRequest(BaseModel):
|
||||
doc_ids: List[str]
|
||||
status: str
|
||||
|
||||
class RemoveDocumentRequest(BaseModel):
|
||||
doc_id: List[str]
|
||||
|
||||
class RunDocumentRequest(BaseModel):
|
||||
doc_ids: List[str]
|
||||
run: str
|
||||
delete: bool = False
|
||||
|
||||
class RenameDocumentRequest(BaseModel):
|
||||
doc_id: str
|
||||
name: str
|
||||
|
||||
class ChangeParserRequest(BaseModel):
|
||||
doc_id: str
|
||||
parser_id: str
|
||||
pipeline_id: Optional[str] = None
|
||||
parser_config: Optional[dict] = None
|
||||
|
||||
class UploadAndParseRequest(BaseModel):
|
||||
conversation_id: str
|
||||
|
||||
class ParseRequest(BaseModel):
|
||||
url: Optional[str] = None
|
||||
|
||||
class SetMetaRequest(BaseModel):
|
||||
doc_id: str
|
||||
meta: str
|
||||
|
||||
|
||||
# Dependency injection
|
||||
async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户"""
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.user_service import UserService
|
||||
from fastapi import HTTPException, status
|
||||
import logging
|
||||
|
||||
try:
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
except ImportError:
|
||||
# 如果没有itsdangerous,使用jwt作为替代
|
||||
import jwt
|
||||
Serializer = jwt
|
||||
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = credentials.credentials
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication attempt with invalid token format: {len(access_token)} chars"
|
||||
)
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"User {user[0].email} has empty access_token in database"
|
||||
)
|
||||
return user[0]
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authorization header required"
|
||||
)
|
||||
|
||||
# Create router
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/upload")
|
||||
async def upload(
|
||||
kb_id: str = Form(...),
|
||||
files: List[UploadFile] = File(...),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if not files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# Use UploadFile directly
|
||||
file_objs = files
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(file_obj.filename.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
if not check_kb_team_permission(kb, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
err, files = await FileService.upload_document(kb, file_objs, current_user.id)
|
||||
if err:
|
||||
return get_json_result(data=files, message="\n".join(err), code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
if not files:
|
||||
return get_json_result(data=files, message="There seems to be an issue with your file format. Please verify it is correct and not corrupted.", code=settings.RetCode.DATA_ERROR)
|
||||
files = [f[0] for f in files] # remove the blob
|
||||
|
||||
return get_json_result(data=files)
|
||||
|
||||
|
||||
@router.post("/web_crawl")
|
||||
async def web_crawl(
|
||||
req: WebCrawlRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = req.kb_id
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
name = req.name
|
||||
url = req.url
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
if not check_kb_team_permission(kb, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
blob = html2pdf(url)
|
||||
if not blob:
|
||||
return server_error_response(ValueError("Download failure."))
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
kb_root_folder = FileService.get_kb_folder(current_user.id)
|
||||
kb_folder = FileService.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
try:
|
||||
filename = duplicate_name(DocumentService.query, name=name + ".pdf", kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb_id, location):
|
||||
location += "_"
|
||||
STORAGE_IMPL.put(kb_id, location, blob)
|
||||
doc = {
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail(filename, blob),
|
||||
"suffix": Path(filename).suffix.lstrip("."),
|
||||
}
|
||||
if doc["type"] == FileType.VISUAL:
|
||||
doc["parser_id"] = ParserType.PICTURE.value
|
||||
if doc["type"] == FileType.AURAL:
|
||||
doc["parser_id"] = ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
doc["parser_id"] = ParserType.PRESENTATION.value
|
||||
if re.search(r"\.(eml)$", filename):
|
||||
doc["parser_id"] = ParserType.EMAIL.value
|
||||
DocumentService.insert(doc)
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.post("/create")
|
||||
async def create(
|
||||
req: CreateDocumentRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = req.kb_id
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(req.name.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if req.name.strip() == "":
|
||||
return get_json_result(data=False, message="File name can't be empty.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
req.name = req.name.strip()
|
||||
|
||||
try:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Can't find this knowledgebase!")
|
||||
|
||||
if DocumentService.query(name=req.name, kb_id=kb_id):
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
kb_root_folder = FileService.get_kb_folder(kb.tenant_id)
|
||||
if not kb_root_folder:
|
||||
return get_data_error_result(message="Cannot find the root folder.")
|
||||
kb_folder = FileService.new_a_file_from_kb(
|
||||
kb.tenant_id,
|
||||
kb.name,
|
||||
kb_root_folder["id"],
|
||||
)
|
||||
if not kb_folder:
|
||||
return get_data_error_result(message="Cannot find the kb folder for this file.")
|
||||
|
||||
doc = DocumentService.insert(
|
||||
{
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": kb.parser_id,
|
||||
"pipeline_id": kb.pipeline_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": FileType.VIRTUAL,
|
||||
"name": req.name,
|
||||
"suffix": Path(req.name).suffix.lstrip("."),
|
||||
"location": "",
|
||||
"size": 0,
|
||||
}
|
||||
)
|
||||
|
||||
FileService.add_file_from_kb(doc.to_dict(), kb_folder["id"], kb.tenant_id)
|
||||
|
||||
return get_json_result(data=doc.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/list")
|
||||
async def list_docs(
|
||||
kb_id: str = Query(...),
|
||||
keywords: str = Query(""),
|
||||
page: int = Query(0),
|
||||
page_size: int = Query(0),
|
||||
orderby: str = Query("create_time"),
|
||||
desc: str = Query("true"),
|
||||
create_time_from: int = Query(0),
|
||||
create_time_to: int = Query(0),
|
||||
req: DocumentListRequest = None,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Only owner of knowledgebase authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
if desc.lower() == "false":
|
||||
desc_bool = False
|
||||
else:
|
||||
desc_bool = True
|
||||
|
||||
run_status = req.run_status if req else []
|
||||
if run_status:
|
||||
invalid_status = {s for s in run_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter run status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
types = req.types if req else []
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||
|
||||
suffix = req.suffix if req else []
|
||||
|
||||
try:
|
||||
docs, tol = DocumentService.get_by_kb_id(kb_id, page, page_size, orderby, desc_bool, keywords, run_status, types, suffix)
|
||||
|
||||
if create_time_from or create_time_to:
|
||||
filtered_docs = []
|
||||
for doc in docs:
|
||||
doc_create_time = doc.get("create_time", 0)
|
||||
if (create_time_from == 0 or doc_create_time >= create_time_from) and (create_time_to == 0 or doc_create_time <= create_time_to):
|
||||
filtered_docs.append(doc)
|
||||
docs = filtered_docs
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{kb_id}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={"total": tol, "docs": docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/filter")
|
||||
async def get_filter(
|
||||
req: DocumentFilterRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = req.kb_id
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Only owner of knowledgebase authorized for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
keywords = req.keywords
|
||||
suffix = req.suffix
|
||||
run_status = req.run_status
|
||||
if run_status:
|
||||
invalid_status = {s for s in run_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter run status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
types = req.types
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||
|
||||
try:
|
||||
filter, total = DocumentService.get_filter_by_kb_id(kb_id, keywords, run_status, types, suffix)
|
||||
return get_json_result(data={"total": total, "filter": filter})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/infos")
|
||||
async def docinfos(
|
||||
req: DocumentInfosRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
doc_ids = req.doc_ids
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
docs = DocumentService.get_by_ids(doc_ids)
|
||||
return get_json_result(data=list(docs.dicts()))
|
||||
|
||||
|
||||
@router.get("/thumbnails")
|
||||
async def thumbnails(
|
||||
doc_ids: List[str] = Query(...)
|
||||
):
|
||||
if not doc_ids:
|
||||
return get_json_result(data=False, message='Lack of "Document ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
docs = DocumentService.get_thumbnails(doc_ids)
|
||||
|
||||
for doc_item in docs:
|
||||
if doc_item["thumbnail"] and not doc_item["thumbnail"].startswith(IMG_BASE64_PREFIX):
|
||||
doc_item["thumbnail"] = f"/v1/document/image/{doc_item['kb_id']}-{doc_item['thumbnail']}"
|
||||
|
||||
return get_json_result(data={d["id"]: d["thumbnail"] for d in docs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/change_status")
|
||||
async def change_status(
|
||||
req: ChangeStatusRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
doc_ids = req.doc_ids
|
||||
status = str(req.status)
|
||||
|
||||
if status not in ["0", "1"]:
|
||||
return get_json_result(data=False, message='"Status" must be either 0 or 1!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
result = {}
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
result[doc_id] = {"error": "No authorization."}
|
||||
continue
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
result[doc_id] = {"error": "No authorization."}
|
||||
continue
|
||||
e, kb = KnowledgebaseService.get_by_id(doc.kb_id)
|
||||
if not e:
|
||||
result[doc_id] = {"error": "Can't find this knowledgebase!"}
|
||||
continue
|
||||
if not DocumentService.update_by_id(doc_id, {"status": str(status)}):
|
||||
result[doc_id] = {"error": "Database error (Document update)!"}
|
||||
continue
|
||||
|
||||
status_int = int(status)
|
||||
if not settings.docStoreConn.update({"doc_id": doc_id}, {"available_int": status_int}, search.index_name(kb.tenant_id), doc.kb_id):
|
||||
result[doc_id] = {"error": "Database error (docStore update)!"}
|
||||
result[doc_id] = {"status": status}
|
||||
except Exception as e:
|
||||
result[doc_id] = {"error": f"Internal server error: {str(e)}"}
|
||||
|
||||
return get_json_result(data=result)
|
||||
|
||||
|
||||
@router.post("/rm")
|
||||
async def rm(
|
||||
req: RemoveDocumentRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
doc_ids = req.doc_id
|
||||
if isinstance(doc_ids, str):
|
||||
doc_ids = [doc_ids]
|
||||
|
||||
for doc_id in doc_ids:
|
||||
if not DocumentService.accessible4deletion(doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
errors = ""
|
||||
kb_table_num_map = {}
|
||||
for doc_id in doc_ids:
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
|
||||
TaskService.filter_delete([Task.doc_id == doc_id])
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(message="Database error (Document removal)!")
|
||||
|
||||
f2d = File2DocumentService.get_by_document_id(doc_id)
|
||||
deleted_file_count = 0
|
||||
if f2d:
|
||||
deleted_file_count = FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc_id)
|
||||
if deleted_file_count > 0:
|
||||
STORAGE_IMPL.rm(b, n)
|
||||
|
||||
doc_parser = doc.parser_id
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.kb_id
|
||||
if kb_id not in kb_table_num_map:
|
||||
counts = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = counts
|
||||
kb_table_num_map[kb_id] -= 1
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
except Exception as e:
|
||||
errors += str(e)
|
||||
|
||||
if errors:
|
||||
return get_json_result(data=False, message=errors, code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.post("/run")
|
||||
async def run(
|
||||
req: RunDocumentRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
for doc_id in req.doc_ids:
|
||||
if not DocumentService.accessible(doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
kb_table_num_map = {}
|
||||
for id in req.doc_ids:
|
||||
info = {"run": str(req.run), "progress": 0}
|
||||
if str(req.run) == TaskStatus.RUNNING.value and req.delete:
|
||||
info["progress_msg"] = ""
|
||||
info["chunk_num"] = 0
|
||||
info["token_num"] = 0
|
||||
|
||||
tenant_id = DocumentService.get_tenant_id(id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
e, doc = DocumentService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if str(req.run) == TaskStatus.CANCEL.value:
|
||||
if str(doc.run) == TaskStatus.RUNNING.value:
|
||||
cancel_all_task_of(id)
|
||||
else:
|
||||
return get_data_error_result(message="Cannot cancel a task that is not in RUNNING status")
|
||||
if all([req.delete, str(req.run) == TaskStatus.RUNNING.value, str(doc.run) == TaskStatus.DONE.value]):
|
||||
DocumentService.clear_chunk_num_when_rerun(doc.id)
|
||||
|
||||
DocumentService.update_by_id(id, info)
|
||||
if req.delete:
|
||||
TaskService.filter_delete([Task.doc_id == id])
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
if str(req.run) == TaskStatus.RUNNING.value:
|
||||
doc = doc.to_dict()
|
||||
doc["tenant_id"] = tenant_id
|
||||
|
||||
doc_parser = doc.get("parser_id", ParserType.NAIVE)
|
||||
if doc_parser == ParserType.TABLE:
|
||||
kb_id = doc.get("kb_id")
|
||||
if not kb_id:
|
||||
continue
|
||||
if kb_id not in kb_table_num_map:
|
||||
count = DocumentService.count_by_kb_id(kb_id=kb_id, keywords="", run_status=[TaskStatus.DONE], types=[])
|
||||
kb_table_num_map[kb_id] = count
|
||||
if kb_table_num_map[kb_id] <= 0:
|
||||
KnowledgebaseService.delete_field_map(kb_id)
|
||||
if doc.get("pipeline_id", ""):
|
||||
queue_dataflow(tenant_id, flow_id=doc["pipeline_id"], task_id=get_uuid(), doc_id=id)
|
||||
else:
|
||||
bucket, name = File2DocumentService.get_storage_address(doc_id=doc["id"])
|
||||
queue_tasks(doc, bucket, name, 0)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/rename")
|
||||
async def rename(
|
||||
req: RenameDocumentRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not DocumentService.accessible(req.doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req.doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if pathlib.Path(req.name.lower()).suffix != pathlib.Path(doc.name.lower()).suffix:
|
||||
return get_json_result(data=False, message="The extension of file can't be changed", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if len(req.name.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
return get_json_result(data=False, message=f"File name must be {FILE_NAME_LEN_LIMIT} bytes or less.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
for d in DocumentService.query(name=req.name, kb_id=doc.kb_id):
|
||||
if d.name == req.name:
|
||||
return get_data_error_result(message="Duplicated document name in the same knowledgebase.")
|
||||
|
||||
if not DocumentService.update_by_id(req.doc_id, {"name": req.name}):
|
||||
return get_data_error_result(message="Database error (Document rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_document_id(req.doc_id)
|
||||
if informs:
|
||||
e, file = FileService.get_by_id(informs[0].file_id)
|
||||
FileService.update_by_id(file.id, {"name": req.name})
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get("/get/{doc_id}")
|
||||
async def get(doc_id: str):
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
b, n = File2DocumentService.get_storage_address(doc_id=doc_id)
|
||||
content = STORAGE_IMPL.get(b, n)
|
||||
|
||||
ext = re.search(r"\.([^.]+)$", doc.name.lower())
|
||||
ext = ext.group(1) if ext else None
|
||||
|
||||
if ext:
|
||||
if doc.type == FileType.VISUAL.value:
|
||||
media_type = CONTENT_TYPE_MAP.get(ext, f"image/{ext}")
|
||||
else:
|
||||
media_type = CONTENT_TYPE_MAP.get(ext, f"application/{ext}")
|
||||
else:
|
||||
media_type = "application/octet-stream"
|
||||
|
||||
return StreamingResponse(
|
||||
iter([content]),
|
||||
media_type=media_type,
|
||||
headers={"Content-Disposition": f"attachment; filename={doc.name}"}
|
||||
)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/change_parser")
|
||||
async def change_parser(
|
||||
req: ChangeParserRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not DocumentService.accessible(req.doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
e, doc = DocumentService.get_by_id(req.doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
def reset_doc():
|
||||
nonlocal doc
|
||||
e = DocumentService.update_by_id(doc.id, {"parser_id": req.parser_id, "progress": 0, "progress_msg": "", "run": TaskStatus.UNSTART.value})
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if doc.token_num > 0:
|
||||
e = DocumentService.increment_chunk_num(doc.id, doc.kb_id, doc.token_num * -1, doc.chunk_num * -1, doc.process_duration * -1)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(req.doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if settings.docStoreConn.indexExist(search.index_name(tenant_id), doc.kb_id):
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
try:
|
||||
if req.pipeline_id:
|
||||
if doc.pipeline_id == req.pipeline_id:
|
||||
return get_json_result(data=True)
|
||||
DocumentService.update_by_id(doc.id, {"pipeline_id": req.pipeline_id})
|
||||
reset_doc()
|
||||
return get_json_result(data=True)
|
||||
|
||||
if doc.parser_id.lower() == req.parser_id.lower():
|
||||
if req.parser_config:
|
||||
if req.parser_config == doc.parser_config:
|
||||
return get_json_result(data=True)
|
||||
else:
|
||||
return get_json_result(data=True)
|
||||
|
||||
if (doc.type == FileType.VISUAL and req.parser_id != "picture") or (re.search(r"\.(ppt|pptx|pages)$", doc.name) and req.parser_id != "presentation"):
|
||||
return get_data_error_result(message="Not supported yet!")
|
||||
if req.parser_config:
|
||||
DocumentService.update_parser_config(doc.id, req.parser_config)
|
||||
reset_doc()
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get("/image/{image_id}")
|
||||
async def get_image(image_id: str):
|
||||
try:
|
||||
arr = image_id.split("-")
|
||||
if len(arr) != 2:
|
||||
return get_data_error_result(message="Image not found.")
|
||||
bkt, nm = image_id.split("-")
|
||||
content = STORAGE_IMPL.get(bkt, nm)
|
||||
return StreamingResponse(
|
||||
iter([content]),
|
||||
media_type="image/JPEG"
|
||||
)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/upload_and_parse")
|
||||
async def upload_and_parse(
|
||||
conversation_id: str = Form(...),
|
||||
files: List[UploadFile] = File(...),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# Use UploadFile directly
|
||||
file_objs = files
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == "":
|
||||
return get_json_result(data=False, message="No file selected!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
doc_ids = await doc_upload_and_parse(conversation_id, file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=doc_ids)
|
||||
|
||||
|
||||
@router.post("/parse")
|
||||
async def parse(
|
||||
req: ParseRequest = None,
|
||||
files: List[UploadFile] = File(None),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
url = req.url if req else ""
|
||||
if url:
|
||||
if not is_valid_url(url):
|
||||
return get_json_result(data=False, message="The URL format is invalid", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
download_path = os.path.join(get_project_base_directory(), "logs/downloads")
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
from seleniumwire.webdriver import Chrome, ChromeOptions
|
||||
|
||||
options = ChromeOptions()
|
||||
options.add_argument("--headless")
|
||||
options.add_argument("--disable-gpu")
|
||||
options.add_argument("--no-sandbox")
|
||||
options.add_argument("--disable-dev-shm-usage")
|
||||
options.add_experimental_option("prefs", {"download.default_directory": download_path, "download.prompt_for_download": False, "download.directory_upgrade": True, "safebrowsing.enabled": True})
|
||||
driver = Chrome(options=options)
|
||||
driver.get(url)
|
||||
res_headers = [r.response.headers for r in driver.requests if r and r.response]
|
||||
if len(res_headers) > 1:
|
||||
sections = RAGFlowHtmlParser().parser_txt(driver.page_source)
|
||||
driver.quit()
|
||||
return get_json_result(data="\n".join(sections))
|
||||
|
||||
class File:
|
||||
filename: str
|
||||
filepath: str
|
||||
|
||||
def __init__(self, filename, filepath):
|
||||
self.filename = filename
|
||||
self.filepath = filepath
|
||||
|
||||
def read(self):
|
||||
with open(self.filepath, "rb") as f:
|
||||
return f.read()
|
||||
|
||||
r = re.search(r"filename=\"([^\"]+)\"", str(res_headers))
|
||||
if not r or not r.group(1):
|
||||
return get_json_result(data=False, message="Can't not identify downloaded file", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
f = File(r.group(1), os.path.join(download_path, r.group(1)))
|
||||
txt = await FileService.parse_docs([f], current_user.id)
|
||||
return get_json_result(data=txt)
|
||||
|
||||
if not files:
|
||||
return get_json_result(data=False, message="No file part!", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
# Use UploadFile directly
|
||||
file_objs = files
|
||||
txt = await FileService.parse_docs(file_objs, current_user.id)
|
||||
|
||||
return get_json_result(data=txt)
|
||||
|
||||
|
||||
@router.post("/set_meta")
|
||||
async def set_meta(
|
||||
req: SetMetaRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not DocumentService.accessible(req.doc_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
try:
|
||||
meta = json.loads(req.meta)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(data=False, message="Only dictionary type supported.", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
for k, v in meta.items():
|
||||
if not isinstance(v, str) and not isinstance(v, int) and not isinstance(v, float):
|
||||
return get_json_result(data=False, message=f"The type is not supported: {v}", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
except Exception as e:
|
||||
return get_json_result(data=False, message=f"Json syntax error: {e}", code=settings.RetCode.ARGUMENT_ERROR)
|
||||
if not isinstance(meta, dict):
|
||||
return get_json_result(data=False, message='Meta data should be in Json map format, like {"key": "value"}', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
try:
|
||||
e, doc = DocumentService.get_by_id(req.doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
|
||||
if not DocumentService.update_by_id(req.doc_id, {"meta_fields": meta}):
|
||||
return get_data_error_result(message="Database error (meta updates)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
212
api/apps/file2document_app.py
Normal file
212
api/apps/file2document_app.py
Normal file
@@ -0,0 +1,212 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, Depends
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Security
|
||||
security = HTTPBearer()
|
||||
|
||||
# Pydantic models for request/response
|
||||
class ConvertRequest(BaseModel):
|
||||
file_ids: List[str]
|
||||
kb_ids: List[str]
|
||||
|
||||
class RemoveFile2DocumentRequest(BaseModel):
|
||||
file_ids: List[str]
|
||||
|
||||
# Dependency injection
|
||||
async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户"""
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.user_service import UserService
|
||||
from fastapi import HTTPException, status
|
||||
import logging
|
||||
|
||||
try:
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
except ImportError:
|
||||
# 如果没有itsdangerous,使用jwt作为替代
|
||||
import jwt
|
||||
Serializer = jwt
|
||||
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = credentials.credentials
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication attempt with invalid token format: {len(access_token)} chars"
|
||||
)
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"User {user[0].email} has empty access_token in database"
|
||||
)
|
||||
return user[0]
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authorization header required"
|
||||
)
|
||||
|
||||
# Create router
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post('/convert')
|
||||
async def convert(
|
||||
req: ConvertRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_ids = req.kb_ids
|
||||
file_ids = req.file_ids
|
||||
file2documents = []
|
||||
|
||||
try:
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_set = dict({file.id: file for file in files})
|
||||
for file_id in file_ids:
|
||||
file = files_set[file_id]
|
||||
if not file:
|
||||
return get_data_error_result(message="File not found!")
|
||||
file_ids_list = [file_id]
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for id in file_ids_list:
|
||||
informs = File2DocumentService.get_by_file_id(id)
|
||||
# delete
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(id)
|
||||
|
||||
# insert
|
||||
for kb_id in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
e, file = FileService.get_by_id(id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this file!")
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": FileService.get_parser(file.type, file.name, kb.parser_id),
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": current_user.id,
|
||||
"type": file.type,
|
||||
"name": file.name,
|
||||
"suffix": Path(file.name).suffix.lstrip("."),
|
||||
"location": file.location,
|
||||
"size": file.size
|
||||
})
|
||||
file2document = File2DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"file_id": id,
|
||||
"document_id": doc.id,
|
||||
})
|
||||
|
||||
file2documents.append(file2document.to_json())
|
||||
return get_json_result(data=file2documents)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/rm')
|
||||
async def rm(
|
||||
req: RemoveFile2DocumentRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
file_ids = req.file_ids
|
||||
if not file_ids:
|
||||
return get_json_result(
|
||||
data=False, message='Lack of "Files ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
if not informs:
|
||||
return get_data_error_result(message="Inform not found!")
|
||||
for inform in informs:
|
||||
if not inform:
|
||||
return get_data_error_result(message="Inform not found!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
481
api/apps/file_app.py
Normal file
481
api/apps/file_app.py
Normal file
@@ -0,0 +1,481 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import os
|
||||
import pathlib
|
||||
import re
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, Depends, File, Form, HTTPException, UploadFile, Query
|
||||
from fastapi.responses import StreamingResponse
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
|
||||
from api.common.check_team_permission import check_file_team_permission
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType, FileSource
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.file_service import FileService
|
||||
from api import settings
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import filename_type
|
||||
from api.utils.web_utils import CONTENT_TYPE_MAP
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from pydantic import BaseModel
|
||||
|
||||
# Security
|
||||
security = HTTPBearer()
|
||||
|
||||
# Pydantic models for request/response
|
||||
class CreateFileRequest(BaseModel):
|
||||
name: str
|
||||
parent_id: Optional[str] = None
|
||||
type: Optional[str] = None
|
||||
|
||||
class RemoveFileRequest(BaseModel):
|
||||
file_ids: List[str]
|
||||
|
||||
class RenameFileRequest(BaseModel):
|
||||
file_id: str
|
||||
name: str
|
||||
|
||||
class MoveFileRequest(BaseModel):
|
||||
src_file_ids: List[str]
|
||||
dest_file_id: str
|
||||
|
||||
# Dependency injection
|
||||
async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户"""
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.user_service import UserService
|
||||
from fastapi import HTTPException, status
|
||||
import logging
|
||||
|
||||
try:
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
except ImportError:
|
||||
# 如果没有itsdangerous,使用jwt作为替代
|
||||
import jwt
|
||||
Serializer = jwt
|
||||
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = credentials.credentials
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication attempt with invalid token format: {len(access_token)} chars"
|
||||
)
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"User {user[0].email} has empty access_token in database"
|
||||
)
|
||||
return user[0]
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authorization header required"
|
||||
)
|
||||
|
||||
# Create router
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post('/upload')
|
||||
async def upload(
|
||||
parent_id: Optional[str] = Form(None),
|
||||
files: List[UploadFile] = File(...),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
pf_id = parent_id
|
||||
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
if not files:
|
||||
return get_json_result(
|
||||
data=False, message='No file part!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
file_objs = files
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(
|
||||
data=False, message='No file selected!', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
file_res = []
|
||||
try:
|
||||
e, pf_folder = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result( message="Can't find this folder!")
|
||||
for file_obj in file_objs:
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get('MAX_FILE_NUM_PER_USER', 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(current_user.id) >= MAX_FILE_NUM_PER_USER:
|
||||
return get_data_error_result( message="Exceed the maximum file number of a free user!")
|
||||
|
||||
# split file name path
|
||||
if not file_obj.filename:
|
||||
file_obj_names = [pf_folder.name, file_obj.filename]
|
||||
else:
|
||||
full_path = '/' + file_obj.filename
|
||||
file_obj_names = full_path.split('/')
|
||||
file_len = len(file_obj_names)
|
||||
|
||||
# get folder
|
||||
file_id_list = FileService.get_id_list_by_id(pf_id, file_obj_names, 1, [pf_id])
|
||||
len_id_list = len(file_id_list)
|
||||
|
||||
# create folder
|
||||
if file_len != len_id_list:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||
if not e:
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names,
|
||||
len_id_list)
|
||||
else:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||
if not e:
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names,
|
||||
len_id_list)
|
||||
|
||||
# file type
|
||||
filetype = filename_type(file_obj_names[file_len - 1])
|
||||
location = file_obj_names[file_len - 1]
|
||||
while STORAGE_IMPL.obj_exist(last_folder.id, location):
|
||||
location += "_"
|
||||
blob = await file_obj.read()
|
||||
filename = duplicate_name(
|
||||
FileService.query,
|
||||
name=file_obj_names[file_len - 1],
|
||||
parent_id=last_folder.id)
|
||||
STORAGE_IMPL.put(last_folder.id, location, blob)
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": last_folder.id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
}
|
||||
file = FileService.insert(file)
|
||||
file_res.append(file.to_json())
|
||||
return get_json_result(data=file_res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/create')
|
||||
async def create(
|
||||
req: CreateFileRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
pf_id = req.parent_id
|
||||
input_file_type = req.type
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
try:
|
||||
if not FileService.is_parent_folder_exist(pf_id):
|
||||
return get_json_result(
|
||||
data=False, message="Parent Folder Doesn't Exist!", code=settings.RetCode.OPERATING_ERROR)
|
||||
if FileService.query(name=req.name, parent_id=pf_id):
|
||||
return get_data_error_result(
|
||||
message="Duplicated folder name in the same folder.")
|
||||
|
||||
if input_file_type == FileType.FOLDER.value:
|
||||
file_type = FileType.FOLDER.value
|
||||
else:
|
||||
file_type = FileType.VIRTUAL.value
|
||||
|
||||
file = FileService.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": pf_id,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"name": req.name,
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": file_type
|
||||
})
|
||||
|
||||
return get_json_result(data=file.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/list')
|
||||
async def list_files(
|
||||
parent_id: Optional[str] = Query(None),
|
||||
keywords: str = Query(""),
|
||||
page: int = Query(1),
|
||||
page_size: int = Query(15),
|
||||
orderby: str = Query("create_time"),
|
||||
desc: bool = Query(True),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
pf_id = parent_id
|
||||
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, current_user.id)
|
||||
try:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
files, total = FileService.get_by_pf_id(
|
||||
current_user.id, pf_id, page, page_size, orderby, desc, keywords)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(pf_id)
|
||||
if not parent_folder:
|
||||
return get_json_result(message="File not found!")
|
||||
|
||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/root_folder')
|
||||
async def get_root_folder(current_user = Depends(get_current_user)):
|
||||
try:
|
||||
root_folder = FileService.get_root_folder(current_user.id)
|
||||
return get_json_result(data={"root_folder": root_folder})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/parent_folder')
|
||||
async def get_parent_folder(
|
||||
file_id: str = Query(...),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
parent_folder = FileService.get_parent_folder(file_id)
|
||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/all_parent_folder')
|
||||
async def get_all_parent_folders(
|
||||
file_id: str = Query(...),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Folder not found!")
|
||||
|
||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||
parent_folders_res = []
|
||||
for parent_folder in parent_folders:
|
||||
parent_folders_res.append(parent_folder.to_json())
|
||||
return get_json_result(data={"parent_folders": parent_folders_res})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/rm')
|
||||
async def rm(
|
||||
req: RemoveFileRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
file_ids = req.file_ids
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not check_file_team_permission(file, current_user.id):
|
||||
return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
if file.source_type == FileSource.KNOWLEDGEBASE:
|
||||
continue
|
||||
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_id_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for inner_file_id in file_id_list:
|
||||
e, file = FileService.get_by_id(inner_file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="File not found!")
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(current_user.id, file_id)
|
||||
else:
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
if not FileService.delete(file):
|
||||
return get_data_error_result(
|
||||
message="Database error (File removal)!")
|
||||
|
||||
# delete file2document
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/rename')
|
||||
async def rename(
|
||||
req: RenameFileRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
try:
|
||||
e, file = FileService.get_by_id(req.file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="File not found!")
|
||||
if not check_file_team_permission(file, current_user.id):
|
||||
return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
if file.type != FileType.FOLDER.value \
|
||||
and pathlib.Path(req.name.lower()).suffix != pathlib.Path(
|
||||
file.name.lower()).suffix:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message="The extension of file can't be changed",
|
||||
code=settings.RetCode.ARGUMENT_ERROR)
|
||||
for file in FileService.query(name=req.name, pf_id=file.parent_id):
|
||||
if file.name == req.name:
|
||||
return get_data_error_result(
|
||||
message="Duplicated file name in the same folder.")
|
||||
|
||||
if not FileService.update_by_id(
|
||||
req.file_id, {"name": req.name}):
|
||||
return get_data_error_result(
|
||||
message="Database error (File rename)!")
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(req.file_id)
|
||||
if informs:
|
||||
if not DocumentService.update_by_id(
|
||||
informs[0].document_id, {"name": req.name}):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document rename)!")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/get/{file_id}')
|
||||
async def get(file_id: str, current_user = Depends(get_current_user)):
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Document not found!")
|
||||
if not check_file_team_permission(file, current_user.id):
|
||||
return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||
if not blob:
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
blob = STORAGE_IMPL.get(b, n)
|
||||
|
||||
ext = re.search(r"\.([^.]+)$", file.name.lower())
|
||||
ext = ext.group(1) if ext else None
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
content_type = CONTENT_TYPE_MAP.get(ext, f"image/{ext}")
|
||||
else:
|
||||
content_type = CONTENT_TYPE_MAP.get(ext, f"application/{ext}")
|
||||
else:
|
||||
content_type = "application/octet-stream"
|
||||
|
||||
return StreamingResponse(
|
||||
iter([blob]),
|
||||
media_type=content_type,
|
||||
headers={"Content-Disposition": f"attachment; filename={file.name}"}
|
||||
)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/mv')
|
||||
async def move(
|
||||
req: MoveFileRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
try:
|
||||
file_ids = req.src_file_ids
|
||||
parent_id = req.dest_file_id
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_dict = {}
|
||||
for file in files:
|
||||
files_dict[file.id] = file
|
||||
|
||||
for file_id in file_ids:
|
||||
file = files_dict[file_id]
|
||||
if not file:
|
||||
return get_data_error_result(message="File or Folder not found!")
|
||||
if not file.tenant_id:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
if not check_file_team_permission(file, current_user.id):
|
||||
return get_json_result(data=False, message='No authorization.', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
fe, _ = FileService.get_by_id(parent_id)
|
||||
if not fe:
|
||||
return get_data_error_result(message="Parent Folder not found!")
|
||||
FileService.move_file(file_ids, parent_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
831
api/apps/kb_app.py
Normal file
831
api/apps/kb_app.py
Normal file
@@ -0,0 +1,831 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
from typing import Optional, List
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, status
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from api.models.kb_models import (
|
||||
CreateKnowledgeBaseRequest,
|
||||
UpdateKnowledgeBaseRequest,
|
||||
DeleteKnowledgeBaseRequest,
|
||||
ListKnowledgeBasesRequest,
|
||||
RemoveTagsRequest,
|
||||
RenameTagRequest,
|
||||
RunGraphRAGRequest,
|
||||
RunRaptorRequest,
|
||||
RunMindmapRequest,
|
||||
ListPipelineLogsRequest,
|
||||
ListPipelineDatasetLogsRequest,
|
||||
DeletePipelineLogsRequest,
|
||||
UnbindTaskRequest
|
||||
)
|
||||
from api.utils.api_utils import get_current_user
|
||||
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.document_service import DocumentService, queue_raptor_o_graphrag_tasks
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.pipeline_operation_log_service import PipelineOperationLogService
|
||||
from api.db.services.task_service import TaskService, GRAPH_RAPTOR_FAKE_DOC_ID
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils.api_utils import get_error_data_result, server_error_response, get_data_error_result, get_json_result
|
||||
from api.utils import get_uuid
|
||||
from api.db import PipelineTaskType, StatusEnum, FileSource, VALID_FILE_TYPES, VALID_TASK_STATUS
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.db_models import File
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from rag.settings import PAGERANK_FLD
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
# 创建 FastAPI 路由器
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post('/create')
|
||||
async def create(
|
||||
request: CreateKnowledgeBaseRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
dataset_name = request.name
|
||||
if not isinstance(dataset_name, str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
if dataset_name.strip() == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(dataset_name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(
|
||||
message=f"Dataset name length is {len(dataset_name)} which is larger than {DATASET_NAME_LIMIT}")
|
||||
|
||||
dataset_name = dataset_name.strip()
|
||||
dataset_name = duplicate_name(
|
||||
KnowledgebaseService.query,
|
||||
name=dataset_name,
|
||||
tenant_id=current_user.id,
|
||||
status=StatusEnum.VALID.value)
|
||||
try:
|
||||
req = {
|
||||
"id": get_uuid(),
|
||||
"name": dataset_name,
|
||||
"tenant_id": current_user.id,
|
||||
"created_by": current_user.id,
|
||||
"parser_id": request.parser_id or "naive",
|
||||
"description": request.description
|
||||
}
|
||||
e, t = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found.")
|
||||
|
||||
# 设置 embd_id 默认值
|
||||
if not request.embd_id:
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
req["embd_id"] = request.embd_id
|
||||
|
||||
if request.parser_config:
|
||||
req["parser_config"] = request.parser_config
|
||||
else:
|
||||
req["parser_config"] = {
|
||||
"layout_recognize": "DeepDOC",
|
||||
"chunk_token_num": 512,
|
||||
"delimiter": "\n",
|
||||
"auto_keywords": 0,
|
||||
"auto_questions": 0,
|
||||
"html4excel": False,
|
||||
"topn_tags": 3,
|
||||
"raptor": {
|
||||
"use_raptor": True,
|
||||
"prompt": "Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize.",
|
||||
"max_token": 256,
|
||||
"threshold": 0.1,
|
||||
"max_cluster": 64,
|
||||
"random_seed": 0
|
||||
},
|
||||
"graphrag": {
|
||||
"use_graphrag": True,
|
||||
"entity_types": [
|
||||
"organization",
|
||||
"person",
|
||||
"geo",
|
||||
"event",
|
||||
"category"
|
||||
],
|
||||
"method": "light"
|
||||
}
|
||||
}
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"kb_id": req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/update')
|
||||
async def update(
|
||||
request: UpdateKnowledgeBaseRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not isinstance(request.name, str):
|
||||
return get_data_error_result(message="Dataset name must be string.")
|
||||
if request.name.strip() == "":
|
||||
return get_data_error_result(message="Dataset name can't be empty.")
|
||||
if len(request.name.encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(
|
||||
message=f"Dataset name length is {len(request.name)} which is large than {DATASET_NAME_LIMIT}")
|
||||
name = request.name.strip()
|
||||
|
||||
if not KnowledgebaseService.accessible4deletion(request.kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
if not KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=request.kb_id):
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(request.kb_id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
|
||||
if name.lower() != kb.name.lower() \
|
||||
and len(
|
||||
KnowledgebaseService.query(name=name, tenant_id=current_user.id, status=StatusEnum.VALID.value)) >= 1:
|
||||
return get_data_error_result(
|
||||
message="Duplicated knowledgebase name.")
|
||||
|
||||
update_data = {
|
||||
"name": name,
|
||||
"pagerank": request.pagerank
|
||||
}
|
||||
if not KnowledgebaseService.update_by_id(kb.id, update_data):
|
||||
return get_data_error_result()
|
||||
|
||||
if kb.pagerank != request.pagerank:
|
||||
if request.pagerank > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: request.pagerank},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD},
|
||||
search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not e:
|
||||
return get_data_error_result(
|
||||
message="Database error (Knowledgebase rename)!")
|
||||
kb = kb.to_dict()
|
||||
kb.update(update_data)
|
||||
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/detail')
|
||||
async def detail(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if KnowledgebaseService.query(
|
||||
tenant_id=tenant.tenant_id, id=kb_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
kb = KnowledgebaseService.get_detail(kb_id)
|
||||
if not kb:
|
||||
return get_data_error_result(
|
||||
message="Can't find this knowledgebase!")
|
||||
kb["size"] = DocumentService.get_total_size_by_kb_id(kb_id=kb["id"],keywords="", run_status=[], types=[])
|
||||
return get_json_result(data=kb)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post('/list')
|
||||
async def list_kbs(
|
||||
request: ListKnowledgeBasesRequest,
|
||||
keywords: str = Query("", description="关键词"),
|
||||
page: int = Query(0, description="页码"),
|
||||
page_size: int = Query(0, description="每页大小"),
|
||||
parser_id: Optional[str] = Query(None, description="解析器ID"),
|
||||
orderby: str = Query("create_time", description="排序字段"),
|
||||
desc: bool = Query(True, description="是否降序"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
page_number = page
|
||||
items_per_page = page_size
|
||||
owner_ids = request.owner_ids
|
||||
try:
|
||||
if not owner_ids:
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
tenants = [m["tenant_id"] for m in tenants]
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
tenants, current_user.id, page_number,
|
||||
items_per_page, orderby, desc, keywords, parser_id)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
kbs, total = KnowledgebaseService.get_by_tenant_ids(
|
||||
tenants, current_user.id, 0,
|
||||
0, orderby, desc, keywords, parser_id)
|
||||
kbs = [kb for kb in kbs if kb["tenant_id"] in tenants]
|
||||
total = len(kbs)
|
||||
if page_number and items_per_page:
|
||||
kbs = kbs[(page_number-1)*items_per_page:page_number*items_per_page]
|
||||
return get_json_result(data={"kbs": kbs, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@router.post('/rm')
|
||||
async def rm(
|
||||
request: DeleteKnowledgeBaseRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible4deletion(request.kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
try:
|
||||
kbs = KnowledgebaseService.query(
|
||||
created_by=current_user.id, id=request.kb_id)
|
||||
if not kbs:
|
||||
return get_json_result(
|
||||
data=False, message='Only owner of knowledgebase authorized for this operation.',
|
||||
code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
for doc in DocumentService.query(kb_id=request.kb_id):
|
||||
if not DocumentService.remove_document(doc, kbs[0].tenant_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Document removal)!")
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
if f2d:
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.id == f2d[0].file_id])
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kbs[0].name])
|
||||
if not KnowledgebaseService.delete_by_id(request.kb_id):
|
||||
return get_data_error_result(
|
||||
message="Database error (Knowledgebase removal)!")
|
||||
for kb in kbs:
|
||||
settings.docStoreConn.delete({"kb_id": kb.id}, search.index_name(kb.tenant_id), kb.id)
|
||||
settings.docStoreConn.deleteIdx(search.index_name(kb.tenant_id), kb.id)
|
||||
if hasattr(STORAGE_IMPL, 'remove_bucket'):
|
||||
STORAGE_IMPL.remove_bucket(kb.id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.get('/{kb_id}/tags')
|
||||
async def list_tags(
|
||||
kb_id: str,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tenants = UserTenantService.get_tenants_by_user_id(current_user.id)
|
||||
tags = []
|
||||
for tenant in tenants:
|
||||
tags += settings.retrievaler.all_tags(tenant["tenant_id"], [kb_id])
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@router.get('/tags')
|
||||
async def list_tags_from_kbs(
|
||||
kb_ids: str = Query(..., description="知识库ID列表,用逗号分隔"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_ids = kb_ids.split(",")
|
||||
for kb_id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
tenants = UserTenantService.get_tenants_by_user_id(current_user.id)
|
||||
tags = []
|
||||
for tenant in tenants:
|
||||
tags += settings.retrievaler.all_tags(tenant["tenant_id"], kb_ids)
|
||||
return get_json_result(data=tags)
|
||||
|
||||
|
||||
@router.post('/{kb_id}/rm_tags')
|
||||
async def rm_tags(
|
||||
kb_id: str,
|
||||
request: RemoveTagsRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
for t in request.tags:
|
||||
settings.docStoreConn.update({"tag_kwd": t, "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": t}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.post('/{kb_id}/rename_tag')
|
||||
async def rename_tags(
|
||||
kb_id: str,
|
||||
request: RenameTagRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
|
||||
settings.docStoreConn.update({"tag_kwd": request.from_tag, "kb_id": [kb_id]},
|
||||
{"remove": {"tag_kwd": request.from_tag.strip()}, "add": {"tag_kwd": request.to_tag}},
|
||||
search.index_name(kb.tenant_id),
|
||||
kb_id)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.get('/{kb_id}/knowledge_graph')
|
||||
async def knowledge_graph(
|
||||
kb_id: str,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
req = {
|
||||
"kb_id": [kb_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), kb_id):
|
||||
return get_json_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [kb_id])
|
||||
if not len(sres.ids):
|
||||
return get_json_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
obj[ty] = content_json
|
||||
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_json_result(data=obj)
|
||||
|
||||
|
||||
@router.delete('/{kb_id}/knowledge_graph')
|
||||
async def delete_knowledge_graph(
|
||||
kb_id: str,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), kb_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.get("/get_meta")
|
||||
async def get_meta(
|
||||
kb_ids: str = Query(..., description="知识库ID列表,用逗号分隔"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_ids = kb_ids.split(",")
|
||||
for kb_id in kb_ids:
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
return get_json_result(data=DocumentService.get_meta_by_kbs(kb_ids))
|
||||
|
||||
|
||||
@router.get("/basic_info")
|
||||
async def get_basic_info(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not KnowledgebaseService.accessible(kb_id, current_user.id):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
|
||||
basic_info = DocumentService.knowledgebase_basic_info(kb_id)
|
||||
|
||||
return get_json_result(data=basic_info)
|
||||
|
||||
|
||||
@router.post("/list_pipeline_logs")
|
||||
async def list_pipeline_logs(
|
||||
request: ListPipelineLogsRequest,
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
keywords: str = Query("", description="关键词"),
|
||||
page: int = Query(0, description="页码"),
|
||||
page_size: int = Query(0, description="每页大小"),
|
||||
orderby: str = Query("create_time", description="排序字段"),
|
||||
desc: bool = Query(True, description="是否降序"),
|
||||
create_date_from: str = Query("", description="创建日期开始"),
|
||||
create_date_to: str = Query("", description="创建日期结束"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
page_number = page
|
||||
items_per_page = page_size
|
||||
|
||||
if create_date_to > create_date_from:
|
||||
return get_data_error_result(message="Create data filter is abnormal.")
|
||||
|
||||
operation_status = request.operation_status
|
||||
if operation_status:
|
||||
invalid_status = {s for s in operation_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter operation_status status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
types = request.types
|
||||
if types:
|
||||
invalid_types = {t for t in types if t not in VALID_FILE_TYPES}
|
||||
if invalid_types:
|
||||
return get_data_error_result(message=f"Invalid filter conditions: {', '.join(invalid_types)} type{'s' if len(invalid_types) > 1 else ''}")
|
||||
|
||||
suffix = request.suffix
|
||||
|
||||
try:
|
||||
logs, tol = PipelineOperationLogService.get_file_logs_by_kb_id(kb_id, page_number, items_per_page, orderby, desc, keywords, operation_status, types, suffix, create_date_from, create_date_to)
|
||||
return get_json_result(data={"total": tol, "logs": logs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/list_pipeline_dataset_logs")
|
||||
async def list_pipeline_dataset_logs(
|
||||
request: ListPipelineDatasetLogsRequest,
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
page: int = Query(0, description="页码"),
|
||||
page_size: int = Query(0, description="每页大小"),
|
||||
orderby: str = Query("create_time", description="排序字段"),
|
||||
desc: bool = Query(True, description="是否降序"),
|
||||
create_date_from: str = Query("", description="创建日期开始"),
|
||||
create_date_to: str = Query("", description="创建日期结束"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
page_number = page
|
||||
items_per_page = page_size
|
||||
|
||||
if create_date_to > create_date_from:
|
||||
return get_data_error_result(message="Create data filter is abnormal.")
|
||||
|
||||
operation_status = request.operation_status
|
||||
if operation_status:
|
||||
invalid_status = {s for s in operation_status if s not in VALID_TASK_STATUS}
|
||||
if invalid_status:
|
||||
return get_data_error_result(message=f"Invalid filter operation_status status conditions: {', '.join(invalid_status)}")
|
||||
|
||||
try:
|
||||
logs, tol = PipelineOperationLogService.get_dataset_logs_by_kb_id(kb_id, page_number, items_per_page, orderby, desc, operation_status, create_date_from, create_date_to)
|
||||
return get_json_result(data={"total": tol, "logs": logs})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@router.post("/delete_pipeline_logs")
|
||||
async def delete_pipeline_logs(
|
||||
request: DeletePipelineLogsRequest,
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_json_result(data=False, message='Lack of "KB ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
log_ids = request.log_ids
|
||||
|
||||
PipelineOperationLogService.delete_by_ids(log_ids)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@router.get("/pipeline_log_detail")
|
||||
async def pipeline_log_detail(
|
||||
log_id: str = Query(..., description="日志ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not log_id:
|
||||
return get_json_result(data=False, message='Lack of "Pipeline log ID"', code=settings.RetCode.ARGUMENT_ERROR)
|
||||
|
||||
ok, log = PipelineOperationLogService.get_by_id(log_id)
|
||||
if not ok:
|
||||
return get_data_error_result(message="Invalid pipeline log ID")
|
||||
|
||||
return get_json_result(data=log.to_dict())
|
||||
|
||||
|
||||
@router.post("/run_graphrag")
|
||||
async def run_graphrag(
|
||||
request: RunGraphRAGRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = request.kb_id
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.graphrag_task_id
|
||||
if task_id:
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
logging.warning(f"A valid GraphRAG task id is expected for kb {kb_id}")
|
||||
|
||||
if task and task.progress not in [-1, 1]:
|
||||
return get_error_data_result(message=f"Task {task_id} in progress with status {task.progress}. A Graph Task is already running.")
|
||||
|
||||
documents, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=kb_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
if not documents:
|
||||
return get_error_data_result(message=f"No documents in Knowledgebase {kb_id}")
|
||||
|
||||
sample_document = documents[0]
|
||||
document_ids = [document["id"] for document in documents]
|
||||
|
||||
task_id = queue_raptor_o_graphrag_tasks(doc=sample_document, ty="graphrag", priority=0, fake_doc_id=GRAPH_RAPTOR_FAKE_DOC_ID, doc_ids=list(document_ids))
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, {"graphrag_task_id": task_id}):
|
||||
logging.warning(f"Cannot save graphrag_task_id for kb {kb_id}")
|
||||
|
||||
return get_json_result(data={"graphrag_task_id": task_id})
|
||||
|
||||
|
||||
@router.get("/trace_graphrag")
|
||||
async def trace_graphrag(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.graphrag_task_id
|
||||
if not task_id:
|
||||
return get_json_result(data={})
|
||||
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="GraphRAG Task Not Found or Error Occurred")
|
||||
|
||||
return get_json_result(data=task.to_dict())
|
||||
|
||||
|
||||
@router.post("/run_raptor")
|
||||
async def run_raptor(
|
||||
request: RunRaptorRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = request.kb_id
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.raptor_task_id
|
||||
if task_id:
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
logging.warning(f"A valid RAPTOR task id is expected for kb {kb_id}")
|
||||
|
||||
if task and task.progress not in [-1, 1]:
|
||||
return get_error_data_result(message=f"Task {task_id} in progress with status {task.progress}. A RAPTOR Task is already running.")
|
||||
|
||||
documents, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=kb_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
if not documents:
|
||||
return get_error_data_result(message=f"No documents in Knowledgebase {kb_id}")
|
||||
|
||||
sample_document = documents[0]
|
||||
document_ids = [document["id"] for document in documents]
|
||||
|
||||
task_id = queue_raptor_o_graphrag_tasks(doc=sample_document, ty="raptor", priority=0, fake_doc_id=GRAPH_RAPTOR_FAKE_DOC_ID, doc_ids=list(document_ids))
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, {"raptor_task_id": task_id}):
|
||||
logging.warning(f"Cannot save raptor_task_id for kb {kb_id}")
|
||||
|
||||
return get_json_result(data={"raptor_task_id": task_id})
|
||||
|
||||
|
||||
@router.get("/trace_raptor")
|
||||
async def trace_raptor(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.raptor_task_id
|
||||
if not task_id:
|
||||
return get_json_result(data={})
|
||||
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="RAPTOR Task Not Found or Error Occurred")
|
||||
|
||||
return get_json_result(data=task.to_dict())
|
||||
|
||||
|
||||
@router.post("/run_mindmap")
|
||||
async def run_mindmap(
|
||||
request: RunMindmapRequest,
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
kb_id = request.kb_id
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.mindmap_task_id
|
||||
if task_id:
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
logging.warning(f"A valid Mindmap task id is expected for kb {kb_id}")
|
||||
|
||||
if task and task.progress not in [-1, 1]:
|
||||
return get_error_data_result(message=f"Task {task_id} in progress with status {task.progress}. A Mindmap Task is already running.")
|
||||
|
||||
documents, _ = DocumentService.get_by_kb_id(
|
||||
kb_id=kb_id,
|
||||
page_number=0,
|
||||
items_per_page=0,
|
||||
orderby="create_time",
|
||||
desc=False,
|
||||
keywords="",
|
||||
run_status=[],
|
||||
types=[],
|
||||
suffix=[],
|
||||
)
|
||||
if not documents:
|
||||
return get_error_data_result(message=f"No documents in Knowledgebase {kb_id}")
|
||||
|
||||
sample_document = documents[0]
|
||||
document_ids = [document["id"] for document in documents]
|
||||
|
||||
task_id = queue_raptor_o_graphrag_tasks(doc=sample_document, ty="mindmap", priority=0, fake_doc_id=GRAPH_RAPTOR_FAKE_DOC_ID, doc_ids=list(document_ids))
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, {"mindmap_task_id": task_id}):
|
||||
logging.warning(f"Cannot save mindmap_task_id for kb {kb_id}")
|
||||
|
||||
return get_json_result(data={"mindmap_task_id": task_id})
|
||||
|
||||
|
||||
@router.get("/trace_mindmap")
|
||||
async def trace_mindmap(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Invalid Knowledgebase ID")
|
||||
|
||||
task_id = kb.mindmap_task_id
|
||||
if not task_id:
|
||||
return get_json_result(data={})
|
||||
|
||||
ok, task = TaskService.get_by_id(task_id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Mindmap Task Not Found or Error Occurred")
|
||||
|
||||
return get_json_result(data=task.to_dict())
|
||||
|
||||
|
||||
@router.delete("/unbind_task")
|
||||
async def delete_kb_task(
|
||||
kb_id: str = Query(..., description="知识库ID"),
|
||||
pipeline_task_type: str = Query(..., description="管道任务类型"),
|
||||
current_user = Depends(get_current_user)
|
||||
):
|
||||
if not kb_id:
|
||||
return get_error_data_result(message='Lack of "KB ID"')
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return get_json_result(data=True)
|
||||
if not pipeline_task_type or pipeline_task_type not in [PipelineTaskType.GRAPH_RAG, PipelineTaskType.RAPTOR, PipelineTaskType.MINDMAP]:
|
||||
return get_error_data_result(message="Invalid task type")
|
||||
|
||||
match pipeline_task_type:
|
||||
case PipelineTaskType.GRAPH_RAG:
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), kb_id)
|
||||
kb_task_id = "graphrag_task_id"
|
||||
kb_task_finish_at = "graphrag_task_finish_at"
|
||||
case PipelineTaskType.RAPTOR:
|
||||
kb_task_id = "raptor_task_id"
|
||||
kb_task_finish_at = "raptor_task_finish_at"
|
||||
case PipelineTaskType.MINDMAP:
|
||||
kb_task_id = "mindmap_task_id"
|
||||
kb_task_finish_at = "mindmap_task_finish_at"
|
||||
case _:
|
||||
return get_error_data_result(message="Internal Error: Invalid task type")
|
||||
|
||||
ok = KnowledgebaseService.update_by_id(kb_id, {kb_task_id: "", kb_task_finish_at: None})
|
||||
if not ok:
|
||||
return server_error_response(f"Internal error: cannot delete task {pipeline_task_type}")
|
||||
|
||||
return get_json_result(data=True)
|
||||
97
api/apps/langfuse_app.py
Normal file
97
api/apps/langfuse_app.py
Normal file
@@ -0,0 +1,97 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
from langfuse import Langfuse
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.utils.api_utils import get_error_data_result, get_json_result, server_error_response, validate_request
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["POST", "PUT"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("secret_key", "public_key", "host")
|
||||
def set_api_key():
|
||||
req = request.get_json()
|
||||
secret_key = req.get("secret_key", "")
|
||||
public_key = req.get("public_key", "")
|
||||
host = req.get("host", "")
|
||||
if not all([secret_key, public_key, host]):
|
||||
return get_error_data_result(message="Missing required fields")
|
||||
|
||||
langfuse_keys = dict(
|
||||
tenant_id=current_user.id,
|
||||
secret_key=secret_key,
|
||||
public_key=public_key,
|
||||
host=host,
|
||||
)
|
||||
|
||||
langfuse = Langfuse(public_key=langfuse_keys["public_key"], secret_key=langfuse_keys["secret_key"], host=langfuse_keys["host"])
|
||||
if not langfuse.auth_check():
|
||||
return get_error_data_result(message="Invalid Langfuse keys")
|
||||
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||
with DB.atomic():
|
||||
try:
|
||||
if not langfuse_entry:
|
||||
TenantLangfuseService.save(**langfuse_keys)
|
||||
else:
|
||||
TenantLangfuseService.update_by_tenant(tenant_id=current_user.id, langfuse_keys=langfuse_keys)
|
||||
return get_json_result(data=langfuse_keys)
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request()
|
||||
def get_api_key():
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant_with_info(tenant_id=current_user.id)
|
||||
if not langfuse_entry:
|
||||
return get_json_result(message="Have not record any Langfuse keys.")
|
||||
|
||||
langfuse = Langfuse(public_key=langfuse_entry["public_key"], secret_key=langfuse_entry["secret_key"], host=langfuse_entry["host"])
|
||||
try:
|
||||
if not langfuse.auth_check():
|
||||
return get_error_data_result(message="Invalid Langfuse keys loaded")
|
||||
except langfuse.api.core.api_error.ApiError as api_err:
|
||||
return get_json_result(message=f"Error from Langfuse: {api_err}")
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
|
||||
langfuse_entry["project_id"] = langfuse.api.projects.get().dict()["data"][0]["id"]
|
||||
langfuse_entry["project_name"] = langfuse.api.projects.get().dict()["data"][0]["name"]
|
||||
|
||||
return get_json_result(data=langfuse_entry)
|
||||
|
||||
|
||||
@manager.route("/api_key", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request()
|
||||
def delete_api_key():
|
||||
langfuse_entry = TenantLangfuseService.filter_by_tenant(tenant_id=current_user.id)
|
||||
if not langfuse_entry:
|
||||
return get_json_result(message="Have not record any Langfuse keys.")
|
||||
|
||||
with DB.atomic():
|
||||
try:
|
||||
TenantLangfuseService.delete_model(langfuse_entry)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
server_error_response(e)
|
||||
396
api/apps/llm_app.py
Normal file
396
api/apps/llm_app.py
Normal file
@@ -0,0 +1,396 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
from api.db.services.tenant_llm_service import LLMFactoriesService, TenantLLMService
|
||||
from api.db.services.llm_service import LLMService
|
||||
from api import settings
|
||||
from api.utils.api_utils import server_error_response, get_data_error_result, validate_request
|
||||
from api.db import StatusEnum, LLMType
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.base64_image import test_image
|
||||
from rag.llm import EmbeddingModel, ChatModel, RerankModel, CvModel, TTSModel
|
||||
|
||||
|
||||
@manager.route('/factories', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def factories():
|
||||
try:
|
||||
fac = LLMFactoriesService.get_all()
|
||||
fac = [f.to_dict() for f in fac if f.name not in ["Youdao", "FastEmbed", "BAAI"]]
|
||||
llms = LLMService.get_all()
|
||||
mdl_types = {}
|
||||
for m in llms:
|
||||
if m.status != StatusEnum.VALID.value:
|
||||
continue
|
||||
if m.fid not in mdl_types:
|
||||
mdl_types[m.fid] = set([])
|
||||
mdl_types[m.fid].add(m.model_type)
|
||||
for f in fac:
|
||||
f["model_types"] = list(mdl_types.get(f["name"], [LLMType.CHAT, LLMType.EMBEDDING, LLMType.RERANK,
|
||||
LLMType.IMAGE2TEXT, LLMType.SPEECH2TEXT, LLMType.TTS]))
|
||||
return get_json_result(data=fac)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/set_api_key', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "api_key")
|
||||
def set_api_key():
|
||||
req = request.json
|
||||
# test if api key works
|
||||
chat_passed, embd_passed, rerank_passed = False, False, False
|
||||
factory = req["llm_factory"]
|
||||
extra = {"provider": factory}
|
||||
msg = ""
|
||||
for llm in LLMService.query(fid=factory):
|
||||
if not embd_passed and llm.model_type == LLMType.EMBEDDING.value:
|
||||
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||
mdl = EmbeddingModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
embd_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({llm.llm_name}) using this api key." + str(e)
|
||||
elif not chat_passed and llm.model_type == LLMType.CHAT.value:
|
||||
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||
mdl = ChatModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"), **extra)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}],
|
||||
{"temperature": 0.9, 'max_tokens': 50})
|
||||
if m.find("**ERROR**") >= 0:
|
||||
raise Exception(m)
|
||||
chat_passed = True
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.fid}/{llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
elif not rerank_passed and llm.model_type == LLMType.RERANK:
|
||||
assert factory in RerankModel, f"Re-rank model from {factory} is not supported yet."
|
||||
mdl = RerankModel[factory](
|
||||
req["api_key"], llm.llm_name, base_url=req.get("base_url"))
|
||||
try:
|
||||
arr, tc = mdl.similarity("What's the weather?", ["Is it sunny today?"])
|
||||
if len(arr) == 0 or tc == 0:
|
||||
raise Exception("Fail")
|
||||
rerank_passed = True
|
||||
logging.debug(f'passed model rerank {llm.llm_name}')
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({llm.fid}/{llm.llm_name}) using this api key." + str(
|
||||
e)
|
||||
if any([embd_passed, chat_passed, rerank_passed]):
|
||||
msg = ''
|
||||
break
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(message=msg)
|
||||
|
||||
llm_config = {
|
||||
"api_key": req["api_key"],
|
||||
"api_base": req.get("base_url", "")
|
||||
}
|
||||
for n in ["model_type", "llm_name"]:
|
||||
if n in req:
|
||||
llm_config[n] = req[n]
|
||||
|
||||
for llm in LLMService.query(fid=factory):
|
||||
llm_config["max_tokens"]=llm.max_tokens
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id,
|
||||
TenantLLM.llm_factory == factory,
|
||||
TenantLLM.llm_name == llm.llm_name],
|
||||
llm_config):
|
||||
TenantLLMService.save(
|
||||
tenant_id=current_user.id,
|
||||
llm_factory=factory,
|
||||
llm_name=llm.llm_name,
|
||||
model_type=llm.model_type,
|
||||
api_key=llm_config["api_key"],
|
||||
api_base=llm_config["api_base"],
|
||||
max_tokens=llm_config["max_tokens"]
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/add_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def add_llm():
|
||||
req = request.json
|
||||
factory = req["llm_factory"]
|
||||
api_key = req.get("api_key", "x")
|
||||
llm_name = req.get("llm_name")
|
||||
|
||||
def apikey_json(keys):
|
||||
nonlocal req
|
||||
return json.dumps({k: req.get(k, "") for k in keys})
|
||||
|
||||
if factory == "VolcEngine":
|
||||
# For VolcEngine, due to its special authentication method
|
||||
# Assemble ark_api_key endpoint_id into api_key
|
||||
api_key = apikey_json(["ark_api_key", "endpoint_id"])
|
||||
|
||||
elif factory == "Tencent Hunyuan":
|
||||
req["api_key"] = apikey_json(["hunyuan_sid", "hunyuan_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Tencent Cloud":
|
||||
req["api_key"] = apikey_json(["tencent_cloud_sid", "tencent_cloud_sk"])
|
||||
return set_api_key()
|
||||
|
||||
elif factory == "Bedrock":
|
||||
# For Bedrock, due to its special authentication method
|
||||
# Assemble bedrock_ak, bedrock_sk, bedrock_region
|
||||
api_key = apikey_json(["bedrock_ak", "bedrock_sk", "bedrock_region"])
|
||||
|
||||
elif factory == "LocalAI":
|
||||
llm_name += "___LocalAI"
|
||||
|
||||
elif factory == "HuggingFace":
|
||||
llm_name += "___HuggingFace"
|
||||
|
||||
elif factory == "OpenAI-API-Compatible":
|
||||
llm_name += "___OpenAI-API"
|
||||
|
||||
elif factory == "VLLM":
|
||||
llm_name += "___VLLM"
|
||||
|
||||
elif factory == "XunFei Spark":
|
||||
if req["model_type"] == "chat":
|
||||
api_key = req.get("spark_api_password", "")
|
||||
elif req["model_type"] == "tts":
|
||||
api_key = apikey_json(["spark_app_id", "spark_api_secret", "spark_api_key"])
|
||||
|
||||
elif factory == "BaiduYiyan":
|
||||
api_key = apikey_json(["yiyan_ak", "yiyan_sk"])
|
||||
|
||||
elif factory == "Fish Audio":
|
||||
api_key = apikey_json(["fish_audio_ak", "fish_audio_refid"])
|
||||
|
||||
elif factory == "Google Cloud":
|
||||
api_key = apikey_json(["google_project_id", "google_region", "google_service_account_key"])
|
||||
|
||||
elif factory == "Azure-OpenAI":
|
||||
api_key = apikey_json(["api_key", "api_version"])
|
||||
|
||||
llm = {
|
||||
"tenant_id": current_user.id,
|
||||
"llm_factory": factory,
|
||||
"model_type": req["model_type"],
|
||||
"llm_name": llm_name,
|
||||
"api_base": req.get("api_base", ""),
|
||||
"api_key": api_key,
|
||||
"max_tokens": req.get("max_tokens")
|
||||
}
|
||||
|
||||
msg = ""
|
||||
mdl_nm = llm["llm_name"].split("___")[0]
|
||||
extra = {"provider": factory}
|
||||
if llm["model_type"] == LLMType.EMBEDDING.value:
|
||||
assert factory in EmbeddingModel, f"Embedding model from {factory} is not supported yet."
|
||||
mdl = EmbeddingModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"])
|
||||
try:
|
||||
arr, tc = mdl.encode(["Test if the api key is available"])
|
||||
if len(arr[0]) == 0:
|
||||
raise Exception("Fail")
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access embedding model({mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.CHAT.value:
|
||||
assert factory in ChatModel, f"Chat model from {factory} is not supported yet."
|
||||
mdl = ChatModel[factory](
|
||||
key=llm['api_key'],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"],
|
||||
**extra,
|
||||
)
|
||||
try:
|
||||
m, tc = mdl.chat(None, [{"role": "user", "content": "Hello! How are you doing!"}], {
|
||||
"temperature": 0.9})
|
||||
if not tc and m.find("**ERROR**:") >= 0:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({factory}/{mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.RERANK:
|
||||
assert factory in RerankModel, f"RE-rank model from {factory} is not supported yet."
|
||||
try:
|
||||
mdl = RerankModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
arr, tc = mdl.similarity("Hello~ RAGFlower!", ["Hi, there!", "Ohh, my friend!"])
|
||||
if len(arr) == 0:
|
||||
raise Exception("Not known.")
|
||||
except KeyError:
|
||||
msg += f"{factory} dose not support this model({factory}/{mdl_nm})"
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({factory}/{mdl_nm})." + str(
|
||||
e)
|
||||
elif llm["model_type"] == LLMType.IMAGE2TEXT.value:
|
||||
assert factory in CvModel, f"Image to text model from {factory} is not supported yet."
|
||||
mdl = CvModel[factory](
|
||||
key=llm["api_key"],
|
||||
model_name=mdl_nm,
|
||||
base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
image_data = test_image
|
||||
m, tc = mdl.describe(image_data)
|
||||
if not m and not tc:
|
||||
raise Exception(m)
|
||||
except Exception as e:
|
||||
msg += f"\nFail to access model({factory}/{mdl_nm})." + str(e)
|
||||
elif llm["model_type"] == LLMType.TTS:
|
||||
assert factory in TTSModel, f"TTS model from {factory} is not supported yet."
|
||||
mdl = TTSModel[factory](
|
||||
key=llm["api_key"], model_name=mdl_nm, base_url=llm["api_base"]
|
||||
)
|
||||
try:
|
||||
for resp in mdl.tts("Hello~ RAGFlower!"):
|
||||
pass
|
||||
except RuntimeError as e:
|
||||
msg += f"\nFail to access model({factory}/{mdl_nm})." + str(e)
|
||||
else:
|
||||
# TODO: check other type of models
|
||||
pass
|
||||
|
||||
if msg:
|
||||
return get_data_error_result(message=msg)
|
||||
|
||||
if not TenantLLMService.filter_update(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == factory,
|
||||
TenantLLM.llm_name == llm["llm_name"]], llm):
|
||||
TenantLLMService.save(**llm)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_llm', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory", "llm_name")
|
||||
def delete_llm():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"],
|
||||
TenantLLM.llm_name == req["llm_name"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/delete_factory', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("llm_factory")
|
||||
def delete_factory():
|
||||
req = request.json
|
||||
TenantLLMService.filter_delete(
|
||||
[TenantLLM.tenant_id == current_user.id, TenantLLM.llm_factory == req["llm_factory"]])
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/my_llms', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def my_llms():
|
||||
try:
|
||||
include_details = request.args.get('include_details', 'false').lower() == 'true'
|
||||
|
||||
if include_details:
|
||||
res = {}
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
factories = LLMFactoriesService.query(status=StatusEnum.VALID.value)
|
||||
|
||||
for o in objs:
|
||||
o_dict = o.to_dict()
|
||||
factory_tags = None
|
||||
for f in factories:
|
||||
if f.name == o_dict["llm_factory"]:
|
||||
factory_tags = f.tags
|
||||
break
|
||||
|
||||
if o_dict["llm_factory"] not in res:
|
||||
res[o_dict["llm_factory"]] = {
|
||||
"tags": factory_tags,
|
||||
"llm": []
|
||||
}
|
||||
|
||||
res[o_dict["llm_factory"]]["llm"].append({
|
||||
"type": o_dict["model_type"],
|
||||
"name": o_dict["llm_name"],
|
||||
"used_token": o_dict["used_tokens"],
|
||||
"api_base": o_dict["api_base"] or "",
|
||||
"max_tokens": o_dict["max_tokens"] or 8192
|
||||
})
|
||||
else:
|
||||
res = {}
|
||||
for o in TenantLLMService.get_my_llms(current_user.id):
|
||||
if o["llm_factory"] not in res:
|
||||
res[o["llm_factory"]] = {
|
||||
"tags": o["tags"],
|
||||
"llm": []
|
||||
}
|
||||
res[o["llm_factory"]]["llm"].append({
|
||||
"type": o["model_type"],
|
||||
"name": o["llm_name"],
|
||||
"used_token": o["used_tokens"]
|
||||
})
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/list', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def list_app():
|
||||
self_deployed = ["Youdao", "FastEmbed", "BAAI", "Ollama", "Xinference", "LocalAI", "LM-Studio", "GPUStack"]
|
||||
weighted = ["Youdao", "FastEmbed", "BAAI"] if settings.LIGHTEN != 0 else []
|
||||
model_type = request.args.get("model_type")
|
||||
try:
|
||||
objs = TenantLLMService.query(tenant_id=current_user.id)
|
||||
facts = set([o.to_dict()["llm_factory"] for o in objs if o.api_key])
|
||||
llms = LLMService.get_all()
|
||||
llms = [m.to_dict()
|
||||
for m in llms if m.status == StatusEnum.VALID.value and m.fid not in weighted]
|
||||
for m in llms:
|
||||
m["available"] = m["fid"] in facts or m["llm_name"].lower() == "flag-embedding" or m["fid"] in self_deployed
|
||||
|
||||
llm_set = set([m["llm_name"] + "@" + m["fid"] for m in llms])
|
||||
for o in objs:
|
||||
if o.llm_name + "@" + o.llm_factory in llm_set:
|
||||
continue
|
||||
llms.append({"llm_name": o.llm_name, "model_type": o.model_type, "fid": o.llm_factory, "available": True})
|
||||
|
||||
res = {}
|
||||
for m in llms:
|
||||
if model_type and m["model_type"].find(model_type) < 0:
|
||||
continue
|
||||
if m["fid"] not in res:
|
||||
res[m["fid"]] = []
|
||||
res[m["fid"]].append(m)
|
||||
|
||||
return get_json_result(data=res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
444
api/apps/mcp_server_app.py
Normal file
444
api/apps/mcp_server_app.py
Normal file
@@ -0,0 +1,444 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from flask import Response, request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api.db import VALID_MCP_SERVER_TYPES
|
||||
from api.db.db_models import MCPServer
|
||||
from api.db.services.mcp_server_service import MCPServerService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.settings import RetCode
|
||||
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, server_error_response, validate_request, \
|
||||
get_mcp_tools
|
||||
from api.utils.web_utils import get_float, safe_json_parse
|
||||
from rag.utils.mcp_tool_call_conn import MCPToolCallSession, close_multiple_mcp_toolcall_sessions
|
||||
|
||||
|
||||
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def list_mcp() -> Response:
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
mcp_ids = req.get("mcp_ids", [])
|
||||
try:
|
||||
servers = MCPServerService.get_servers(current_user.id, mcp_ids, 0, 0, orderby, desc, keywords) or []
|
||||
total = len(servers)
|
||||
|
||||
if page_number and items_per_page:
|
||||
servers = servers[(page_number - 1) * items_per_page : page_number * items_per_page]
|
||||
|
||||
return get_json_result(data={"mcp_servers": servers, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/detail", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def detail() -> Response:
|
||||
mcp_id = request.args["mcp_id"]
|
||||
try:
|
||||
mcp_server = MCPServerService.get_or_none(id=mcp_id, tenant_id=current_user.id)
|
||||
|
||||
if mcp_server is None:
|
||||
return get_json_result(code=RetCode.NOT_FOUND, data=None)
|
||||
|
||||
return get_json_result(data=mcp_server.to_dict())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/create", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name", "url", "server_type")
|
||||
def create() -> Response:
|
||||
req = request.get_json()
|
||||
|
||||
server_type = req.get("server_type", "")
|
||||
if server_type not in VALID_MCP_SERVER_TYPES:
|
||||
return get_data_error_result(message="Unsupported MCP server type.")
|
||||
|
||||
server_name = req.get("name", "")
|
||||
if not server_name or len(server_name.encode("utf-8")) > 255:
|
||||
return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
|
||||
|
||||
e, _ = MCPServerService.get_by_name_and_tenant(name=server_name, tenant_id=current_user.id)
|
||||
if e:
|
||||
return get_data_error_result(message="Duplicated MCP server name.")
|
||||
|
||||
url = req.get("url", "")
|
||||
if not url:
|
||||
return get_data_error_result(message="Invalid url.")
|
||||
|
||||
headers = safe_json_parse(req.get("headers", {}))
|
||||
req["headers"] = headers
|
||||
variables = safe_json_parse(req.get("variables", {}))
|
||||
variables.pop("tools", None)
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
|
||||
try:
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = current_user.id
|
||||
|
||||
e, _ = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Tenant not found.")
|
||||
|
||||
mcp_server = MCPServer(id=server_name, name=server_name, url=url, server_type=server_type, variables=variables, headers=headers)
|
||||
server_tools, err_message = get_mcp_tools([mcp_server], timeout)
|
||||
if err_message:
|
||||
return get_data_error_result(err_message)
|
||||
|
||||
tools = server_tools[server_name]
|
||||
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
|
||||
variables["tools"] = tools
|
||||
req["variables"] = variables
|
||||
|
||||
if not MCPServerService.insert(**req):
|
||||
return get_data_error_result("Failed to create MCP server.")
|
||||
|
||||
return get_json_result(data=req)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/update", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_id")
|
||||
def update() -> Response:
|
||||
req = request.get_json()
|
||||
|
||||
mcp_id = req.get("mcp_id", "")
|
||||
e, mcp_server = MCPServerService.get_by_id(mcp_id)
|
||||
if not e or mcp_server.tenant_id != current_user.id:
|
||||
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
|
||||
|
||||
server_type = req.get("server_type", mcp_server.server_type)
|
||||
if server_type and server_type not in VALID_MCP_SERVER_TYPES:
|
||||
return get_data_error_result(message="Unsupported MCP server type.")
|
||||
server_name = req.get("name", mcp_server.name)
|
||||
if server_name and len(server_name.encode("utf-8")) > 255:
|
||||
return get_data_error_result(message=f"Invalid MCP name or length is {len(server_name)} which is large than 255.")
|
||||
url = req.get("url", mcp_server.url)
|
||||
if not url:
|
||||
return get_data_error_result(message="Invalid url.")
|
||||
|
||||
headers = safe_json_parse(req.get("headers", mcp_server.headers))
|
||||
req["headers"] = headers
|
||||
|
||||
variables = safe_json_parse(req.get("variables", mcp_server.variables))
|
||||
variables.pop("tools", None)
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
|
||||
try:
|
||||
req["tenant_id"] = current_user.id
|
||||
req.pop("mcp_id", None)
|
||||
req["id"] = mcp_id
|
||||
|
||||
mcp_server = MCPServer(id=server_name, name=server_name, url=url, server_type=server_type, variables=variables, headers=headers)
|
||||
server_tools, err_message = get_mcp_tools([mcp_server], timeout)
|
||||
if err_message:
|
||||
return get_data_error_result(err_message)
|
||||
|
||||
tools = server_tools[server_name]
|
||||
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
|
||||
variables["tools"] = tools
|
||||
req["variables"] = variables
|
||||
|
||||
if not MCPServerService.filter_update([MCPServer.id == mcp_id, MCPServer.tenant_id == current_user.id], req):
|
||||
return get_data_error_result(message="Failed to updated MCP server.")
|
||||
|
||||
e, updated_mcp = MCPServerService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_data_error_result(message="Failed to fetch updated MCP server.")
|
||||
|
||||
return get_json_result(data=updated_mcp.to_dict())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/rm", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_ids")
|
||||
def rm() -> Response:
|
||||
req = request.get_json()
|
||||
mcp_ids = req.get("mcp_ids", [])
|
||||
|
||||
try:
|
||||
req["tenant_id"] = current_user.id
|
||||
|
||||
if not MCPServerService.delete_by_ids(mcp_ids):
|
||||
return get_data_error_result(message=f"Failed to delete MCP servers {mcp_ids}")
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/import", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcpServers")
|
||||
def import_multiple() -> Response:
|
||||
req = request.get_json()
|
||||
servers = req.get("mcpServers", {})
|
||||
if not servers:
|
||||
return get_data_error_result(message="No MCP servers provided.")
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
|
||||
results = []
|
||||
try:
|
||||
for server_name, config in servers.items():
|
||||
if not all(key in config for key in {"type", "url"}):
|
||||
results.append({"server": server_name, "success": False, "message": "Missing required fields (type or url)"})
|
||||
continue
|
||||
|
||||
if not server_name or len(server_name.encode("utf-8")) > 255:
|
||||
results.append({"server": server_name, "success": False, "message": f"Invalid MCP name or length is {len(server_name)} which is large than 255."})
|
||||
continue
|
||||
|
||||
base_name = server_name
|
||||
new_name = base_name
|
||||
counter = 0
|
||||
|
||||
while True:
|
||||
e, _ = MCPServerService.get_by_name_and_tenant(name=new_name, tenant_id=current_user.id)
|
||||
if not e:
|
||||
break
|
||||
new_name = f"{base_name}_{counter}"
|
||||
counter += 1
|
||||
|
||||
create_data = {
|
||||
"id": get_uuid(),
|
||||
"tenant_id": current_user.id,
|
||||
"name": new_name,
|
||||
"url": config["url"],
|
||||
"server_type": config["type"],
|
||||
"variables": {"authorization_token": config.get("authorization_token", "")},
|
||||
}
|
||||
|
||||
headers = {"authorization_token": config["authorization_token"]} if "authorization_token" in config else {}
|
||||
variables = {k: v for k, v in config.items() if k not in {"type", "url", "headers"}}
|
||||
mcp_server = MCPServer(id=new_name, name=new_name, url=config["url"], server_type=config["type"], variables=variables, headers=headers)
|
||||
server_tools, err_message = get_mcp_tools([mcp_server], timeout)
|
||||
if err_message:
|
||||
results.append({"server": base_name, "success": False, "message": err_message})
|
||||
continue
|
||||
|
||||
tools = server_tools[new_name]
|
||||
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
|
||||
create_data["variables"]["tools"] = tools
|
||||
|
||||
if MCPServerService.insert(**create_data):
|
||||
result = {"server": server_name, "success": True, "action": "created", "id": create_data["id"], "new_name": new_name}
|
||||
if new_name != base_name:
|
||||
result["message"] = f"Renamed from '{base_name}' to '{new_name}' avoid duplication"
|
||||
results.append(result)
|
||||
else:
|
||||
results.append({"server": server_name, "success": False, "message": "Failed to create MCP server."})
|
||||
|
||||
return get_json_result(data={"results": results})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/export", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_ids")
|
||||
def export_multiple() -> Response:
|
||||
req = request.get_json()
|
||||
mcp_ids = req.get("mcp_ids", [])
|
||||
|
||||
if not mcp_ids:
|
||||
return get_data_error_result(message="No MCP server IDs provided.")
|
||||
|
||||
try:
|
||||
exported_servers = {}
|
||||
|
||||
for mcp_id in mcp_ids:
|
||||
e, mcp_server = MCPServerService.get_by_id(mcp_id)
|
||||
|
||||
if e and mcp_server.tenant_id == current_user.id:
|
||||
server_key = mcp_server.name
|
||||
|
||||
exported_servers[server_key] = {
|
||||
"type": mcp_server.server_type,
|
||||
"url": mcp_server.url,
|
||||
"name": mcp_server.name,
|
||||
"authorization_token": mcp_server.variables.get("authorization_token", ""),
|
||||
"tools": mcp_server.variables.get("tools", {}),
|
||||
}
|
||||
|
||||
return get_json_result(data={"mcpServers": exported_servers})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list_tools", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_ids")
|
||||
def list_tools() -> Response:
|
||||
req = request.get_json()
|
||||
mcp_ids = req.get("mcp_ids", [])
|
||||
if not mcp_ids:
|
||||
return get_data_error_result(message="No MCP server IDs provided.")
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
|
||||
results = {}
|
||||
tool_call_sessions = []
|
||||
try:
|
||||
for mcp_id in mcp_ids:
|
||||
e, mcp_server = MCPServerService.get_by_id(mcp_id)
|
||||
|
||||
if e and mcp_server.tenant_id == current_user.id:
|
||||
server_key = mcp_server.id
|
||||
|
||||
cached_tools = mcp_server.variables.get("tools", {})
|
||||
|
||||
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
|
||||
tool_call_sessions.append(tool_call_session)
|
||||
|
||||
try:
|
||||
tools = tool_call_session.get_tools(timeout)
|
||||
except Exception as e:
|
||||
tools = []
|
||||
return get_data_error_result(message=f"MCP list tools error: {e}")
|
||||
|
||||
results[server_key] = []
|
||||
for tool in tools:
|
||||
tool_dict = tool.model_dump()
|
||||
cached_tool = cached_tools.get(tool_dict["name"], {})
|
||||
|
||||
tool_dict["enabled"] = cached_tool.get("enabled", True)
|
||||
results[server_key].append(tool_dict)
|
||||
|
||||
return get_json_result(data=results)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
finally:
|
||||
# PERF: blocking call to close sessions — consider moving to background thread or task queue
|
||||
close_multiple_mcp_toolcall_sessions(tool_call_sessions)
|
||||
|
||||
|
||||
@manager.route("/test_tool", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_id", "tool_name", "arguments")
|
||||
def test_tool() -> Response:
|
||||
req = request.get_json()
|
||||
mcp_id = req.get("mcp_id", "")
|
||||
if not mcp_id:
|
||||
return get_data_error_result(message="No MCP server ID provided.")
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
|
||||
tool_name = req.get("tool_name", "")
|
||||
arguments = req.get("arguments", {})
|
||||
if not all([tool_name, arguments]):
|
||||
return get_data_error_result(message="Require provide tool name and arguments.")
|
||||
|
||||
tool_call_sessions = []
|
||||
try:
|
||||
e, mcp_server = MCPServerService.get_by_id(mcp_id)
|
||||
if not e or mcp_server.tenant_id != current_user.id:
|
||||
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
|
||||
|
||||
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
|
||||
tool_call_sessions.append(tool_call_session)
|
||||
result = tool_call_session.tool_call(tool_name, arguments, timeout)
|
||||
|
||||
# PERF: blocking call to close sessions — consider moving to background thread or task queue
|
||||
close_multiple_mcp_toolcall_sessions(tool_call_sessions)
|
||||
return get_json_result(data=result)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/cache_tools", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("mcp_id", "tools")
|
||||
def cache_tool() -> Response:
|
||||
req = request.get_json()
|
||||
mcp_id = req.get("mcp_id", "")
|
||||
if not mcp_id:
|
||||
return get_data_error_result(message="No MCP server ID provided.")
|
||||
tools = req.get("tools", [])
|
||||
|
||||
e, mcp_server = MCPServerService.get_by_id(mcp_id)
|
||||
if not e or mcp_server.tenant_id != current_user.id:
|
||||
return get_data_error_result(message=f"Cannot find MCP server {mcp_id} for user {current_user.id}")
|
||||
|
||||
variables = mcp_server.variables
|
||||
tools = {tool["name"]: tool for tool in tools if isinstance(tool, dict) and "name" in tool}
|
||||
variables["tools"] = tools
|
||||
|
||||
if not MCPServerService.filter_update([MCPServer.id == mcp_id, MCPServer.tenant_id == current_user.id], {"variables": variables}):
|
||||
return get_data_error_result(message="Failed to updated MCP server.")
|
||||
|
||||
return get_json_result(data=tools)
|
||||
|
||||
|
||||
@manager.route("/test_mcp", methods=["POST"]) # noqa: F821
|
||||
@validate_request("url", "server_type")
|
||||
def test_mcp() -> Response:
|
||||
req = request.get_json()
|
||||
|
||||
url = req.get("url", "")
|
||||
if not url:
|
||||
return get_data_error_result(message="Invalid MCP url.")
|
||||
|
||||
server_type = req.get("server_type", "")
|
||||
if server_type not in VALID_MCP_SERVER_TYPES:
|
||||
return get_data_error_result(message="Unsupported MCP server type.")
|
||||
|
||||
timeout = get_float(req, "timeout", 10)
|
||||
headers = safe_json_parse(req.get("headers", {}))
|
||||
variables = safe_json_parse(req.get("variables", {}))
|
||||
|
||||
mcp_server = MCPServer(id=f"{server_type}: {url}", server_type=server_type, url=url, headers=headers, variables=variables)
|
||||
|
||||
result = []
|
||||
try:
|
||||
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
|
||||
|
||||
try:
|
||||
tools = tool_call_session.get_tools(timeout)
|
||||
except Exception as e:
|
||||
tools = []
|
||||
return get_data_error_result(message=f"Test MCP error: {e}")
|
||||
finally:
|
||||
# PERF: blocking call to close sessions — consider moving to background thread or task queue
|
||||
close_multiple_mcp_toolcall_sessions([tool_call_session])
|
||||
|
||||
for tool in tools:
|
||||
tool_dict = tool.model_dump()
|
||||
tool_dict["enabled"] = True
|
||||
result.append(tool_dict)
|
||||
|
||||
return get_json_result(data=result)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
12
api/apps/plugin_app.py
Normal file
12
api/apps/plugin_app.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from flask import Response
|
||||
from flask_login import login_required
|
||||
from api.utils.api_utils import get_json_result
|
||||
from plugin import GlobalPluginManager
|
||||
|
||||
@manager.route('/llm_tools', methods=['GET']) # noqa: F821
|
||||
@login_required
|
||||
def llm_tools() -> Response:
|
||||
tools = GlobalPluginManager.get_llm_tools()
|
||||
tools_metadata = [t.get_metadata() for t in tools]
|
||||
|
||||
return get_json_result(data=tools_metadata)
|
||||
128
api/apps/sdk/agent.py
Normal file
128
api/apps/sdk/agent.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import json
|
||||
import time
|
||||
from typing import Any, cast
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.settings import RetCode
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_error_data_result, get_json_result, token_required
|
||||
from api.utils.api_utils import get_result
|
||||
from flask import request
|
||||
|
||||
@manager.route('/agents', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_agents(tenant_id):
|
||||
id = request.args.get("id")
|
||||
title = request.args.get("title")
|
||||
if id or title:
|
||||
canvas = UserCanvasService.query(id=id, title=title, user_id=tenant_id)
|
||||
if not canvas:
|
||||
return get_error_data_result("The agent doesn't exist.")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "update_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
canvas = UserCanvasService.get_list(tenant_id,page_number,items_per_page,orderby,desc,id,title)
|
||||
return get_result(data=canvas)
|
||||
|
||||
|
||||
@manager.route("/agents", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create_agent(tenant_id: str):
|
||||
req: dict[str, Any] = cast(dict[str, Any], request.json)
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
else:
|
||||
return get_json_result(data=False, message="No DSL data in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
else:
|
||||
return get_json_result(data=False, message="No title in request.", code=RetCode.ARGUMENT_ERROR)
|
||||
|
||||
if UserCanvasService.query(user_id=tenant_id, title=req["title"]):
|
||||
return get_data_error_result(message=f"Agent with title {req['title']} already exists.")
|
||||
|
||||
agent_id = get_uuid()
|
||||
req["id"] = agent_id
|
||||
|
||||
if not UserCanvasService.save(**req):
|
||||
return get_data_error_result(message="Fail to create agent.")
|
||||
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update_agent(tenant_id: str, agent_id: str):
|
||||
req: dict[str, Any] = {k: v for k, v in cast(dict[str, Any], request.json).items() if v is not None}
|
||||
req["user_id"] = tenant_id
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
if not isinstance(req["dsl"], str):
|
||||
req["dsl"] = json.dumps(req["dsl"], ensure_ascii=False)
|
||||
|
||||
req["dsl"] = json.loads(req["dsl"])
|
||||
|
||||
if req.get("title") is not None:
|
||||
req["title"] = req["title"].strip()
|
||||
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.update_by_id(agent_id, req)
|
||||
|
||||
if req.get("dsl") is not None:
|
||||
UserCanvasVersionService.insert(
|
||||
user_canvas_id=agent_id,
|
||||
title="{0}_{1}".format(req["title"], time.strftime("%Y_%m_%d_%H_%M_%S")),
|
||||
dsl=req["dsl"]
|
||||
)
|
||||
|
||||
UserCanvasVersionService.delete_all_versions(agent_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/agents/<agent_id>", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete_agent(tenant_id: str, agent_id: str):
|
||||
if not UserCanvasService.query(user_id=tenant_id, id=agent_id):
|
||||
return get_json_result(
|
||||
data=False, message="Only owner of canvas authorized for this operation.",
|
||||
code=RetCode.OPERATING_ERROR)
|
||||
|
||||
UserCanvasService.delete_by_id(agent_id)
|
||||
return get_json_result(data=True)
|
||||
325
api/apps/sdk/chat.py
Normal file
325
api/apps/sdk/chat.py
Normal file
@@ -0,0 +1,325 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request
|
||||
|
||||
from api import settings
|
||||
from api.db import StatusEnum
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import check_duplicate_ids, get_error_data_result, get_result, token_required
|
||||
|
||||
|
||||
@manager.route("/chats", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
req = request.json
|
||||
ids = [i for i in req.get("dataset_ids", []) if i]
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids) if ids else []
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
# llm
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if req.get("llm_id") is not None:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
prompt[new_key] = prompt.pop(old_key)
|
||||
for key in key_list:
|
||||
if key in prompt:
|
||||
req[key] = prompt.pop(key)
|
||||
req["prompt_config"] = req.pop("prompt")
|
||||
# init
|
||||
req["id"] = get_uuid()
|
||||
req["description"] = req.get("description", "A helpful Assistant")
|
||||
req["icon"] = req.get("avatar", "")
|
||||
req["top_n"] = req.get("top_n", 6)
|
||||
req["top_k"] = req.get("top_k", 1024)
|
||||
req["rerank_id"] = req.get("rerank_id", "")
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if not req.get("llm_id"):
|
||||
req["llm_id"] = tenant.llm_id
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` is required.")
|
||||
if DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="Duplicated chat name in creating chat.")
|
||||
# tenant_id
|
||||
if req.get("tenant_id"):
|
||||
return get_error_data_result(message="`tenant_id` must not be provided.")
|
||||
req["tenant_id"] = tenant_id
|
||||
# prompt more parameter
|
||||
default_prompt = {
|
||||
"system": """You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the knowledge base!" Answers need to consider chat history.
|
||||
Here is the knowledge base:
|
||||
{knowledge}
|
||||
The above is the knowledge base.""",
|
||||
"prologue": "Hi! I'm your assistant. What can I do for you?",
|
||||
"parameters": [{"key": "knowledge", "optional": False}],
|
||||
"empty_response": "Sorry! No relevant content was found in the knowledge base!",
|
||||
"quote": True,
|
||||
"tts": False,
|
||||
"refine_multiturn": True,
|
||||
}
|
||||
key_list_2 = ["system", "prologue", "parameters", "empty_response", "quote", "tts", "refine_multiturn"]
|
||||
if "prompt_config" not in req:
|
||||
req["prompt_config"] = {}
|
||||
for key in key_list_2:
|
||||
temp = req["prompt_config"].get(key)
|
||||
if (not temp and key == "system") or (key not in req["prompt_config"]):
|
||||
req["prompt_config"][key] = default_prompt[key]
|
||||
for p in req["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if req["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||
# save
|
||||
if not DialogService.save(**req):
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
# response
|
||||
e, res = DialogService.get_by_id(req["id"])
|
||||
if not e:
|
||||
return get_error_data_result(message="Fail to new a chat!")
|
||||
res = res.to_json()
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
res["llm"] = res.pop("llm_setting")
|
||||
res["llm"]["model_name"] = res.pop("llm_id")
|
||||
del res["kb_ids"]
|
||||
res["dataset_ids"] = req.get("dataset_ids", [])
|
||||
res["avatar"] = res.pop("icon")
|
||||
return get_result(data=res)
|
||||
|
||||
|
||||
@manager.route("/chats/<chat_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, chat_id):
|
||||
if not DialogService.query(tenant_id=tenant_id, id=chat_id, status=StatusEnum.VALID.value):
|
||||
return get_error_data_result(message="You do not own the chat")
|
||||
req = request.json
|
||||
ids = req.get("dataset_ids", [])
|
||||
if "show_quotation" in req:
|
||||
req["do_refer"] = req.pop("show_quotation")
|
||||
if ids:
|
||||
for kb_id in ids:
|
||||
kbs = KnowledgebaseService.accessible(kb_id=kb_id, user_id=tenant_id)
|
||||
if not kbs:
|
||||
return get_error_data_result(f"You don't own the dataset {kb_id}")
|
||||
kbs = KnowledgebaseService.query(id=kb_id)
|
||||
kb = kbs[0]
|
||||
if kb.chunk_num == 0:
|
||||
return get_error_data_result(f"The dataset {kb_id} doesn't own parsed file")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(ids)
|
||||
embd_ids = [TenantLLMService.split_model_name_and_factory(kb.embd_id)[0] for kb in kbs] # remove vendor suffix for comparison
|
||||
embd_count = list(set(embd_ids))
|
||||
if len(embd_count) > 1:
|
||||
return get_result(message='Datasets use different embedding models."', code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
req["kb_ids"] = ids
|
||||
llm = req.get("llm")
|
||||
if llm:
|
||||
if "model_name" in llm:
|
||||
req["llm_id"] = llm.pop("model_name")
|
||||
if req.get("llm_id") is not None:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(req["llm_id"])
|
||||
if not TenantLLMService.query(tenant_id=tenant_id, llm_name=llm_name, llm_factory=llm_factory, model_type="chat"):
|
||||
return get_error_data_result(f"`model_name` {req.get('llm_id')} doesn't exist")
|
||||
req["llm_setting"] = req.pop("llm")
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_error_data_result(message="Tenant not found!")
|
||||
# prompt
|
||||
prompt = req.get("prompt")
|
||||
key_mapping = {"parameters": "variables", "prologue": "opener", "quote": "show_quote", "system": "prompt", "rerank_id": "rerank_model", "vector_similarity_weight": "keywords_similarity_weight"}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id", "top_k"]
|
||||
if prompt:
|
||||
for new_key, old_key in key_mapping.items():
|
||||
if old_key in prompt:
|
||||
prompt[new_key] = prompt.pop(old_key)
|
||||
for key in key_list:
|
||||
if key in prompt:
|
||||
req[key] = prompt.pop(key)
|
||||
req["prompt_config"] = req.pop("prompt")
|
||||
e, res = DialogService.get_by_id(chat_id)
|
||||
res = res.to_json()
|
||||
if req.get("rerank_id"):
|
||||
value_rerank_model = ["BAAI/bge-reranker-v2-m3", "maidalun1020/bce-reranker-base_v1"]
|
||||
if req["rerank_id"] not in value_rerank_model and not TenantLLMService.query(tenant_id=tenant_id, llm_name=req.get("rerank_id"), model_type="rerank"):
|
||||
return get_error_data_result(f"`rerank_model` {req.get('rerank_id')} doesn't exist")
|
||||
if "name" in req:
|
||||
if not req.get("name"):
|
||||
return get_error_data_result(message="`name` cannot be empty.")
|
||||
if req["name"].lower() != res["name"].lower() and len(DialogService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) > 0:
|
||||
return get_error_data_result(message="Duplicated chat name in updating chat.")
|
||||
if "prompt_config" in req:
|
||||
res["prompt_config"].update(req["prompt_config"])
|
||||
for p in res["prompt_config"]["parameters"]:
|
||||
if p["optional"]:
|
||||
continue
|
||||
if res["prompt_config"]["system"].find("{%s}" % p["key"]) < 0:
|
||||
return get_error_data_result(message="Parameter '{}' is not used".format(p["key"]))
|
||||
if "llm_setting" in req:
|
||||
res["llm_setting"].update(req["llm_setting"])
|
||||
req["prompt_config"] = res["prompt_config"]
|
||||
req["llm_setting"] = res["llm_setting"]
|
||||
# avatar
|
||||
if "avatar" in req:
|
||||
req["icon"] = req.pop("avatar")
|
||||
if "dataset_ids" in req:
|
||||
req.pop("dataset_ids")
|
||||
if not DialogService.update_by_id(chat_id, req):
|
||||
return get_error_data_result(message="Chat not found!")
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/chats", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
errors = []
|
||||
success_count = 0
|
||||
req = request.json
|
||||
if not req:
|
||||
ids = None
|
||||
else:
|
||||
ids = req.get("ids")
|
||||
if not ids:
|
||||
id_list = []
|
||||
dias = DialogService.query(tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
for dia in dias:
|
||||
id_list.append(dia.id)
|
||||
else:
|
||||
id_list = ids
|
||||
|
||||
unique_id_list, duplicate_messages = check_duplicate_ids(id_list, "assistant")
|
||||
|
||||
for id in unique_id_list:
|
||||
if not DialogService.query(tenant_id=tenant_id, id=id, status=StatusEnum.VALID.value):
|
||||
errors.append(f"Assistant({id}) not found.")
|
||||
continue
|
||||
temp_dict = {"status": StatusEnum.INVALID.value}
|
||||
DialogService.update_by_id(id, temp_dict)
|
||||
success_count += 1
|
||||
|
||||
if errors:
|
||||
if success_count > 0:
|
||||
return get_result(data={"success_count": success_count, "errors": errors}, message=f"Partially deleted {success_count} chats with {len(errors)} errors")
|
||||
else:
|
||||
return get_error_data_result(message="; ".join(errors))
|
||||
|
||||
if duplicate_messages:
|
||||
if success_count > 0:
|
||||
return get_result(message=f"Partially deleted {success_count} chats with {len(duplicate_messages)} errors", data={"success_count": success_count, "errors": duplicate_messages})
|
||||
else:
|
||||
return get_error_data_result(message=";".join(duplicate_messages))
|
||||
|
||||
return get_result()
|
||||
|
||||
|
||||
@manager.route("/chats", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_chat(tenant_id):
|
||||
id = request.args.get("id")
|
||||
name = request.args.get("name")
|
||||
if id or name:
|
||||
chat = DialogService.query(id=id, name=name, status=StatusEnum.VALID.value, tenant_id=tenant_id)
|
||||
if not chat:
|
||||
return get_error_data_result(message="The chat doesn't exist")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 30))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc") == "False" or request.args.get("desc") == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
chats = DialogService.get_list(tenant_id, page_number, items_per_page, orderby, desc, id, name)
|
||||
if not chats:
|
||||
return get_result(data=[])
|
||||
list_assts = []
|
||||
key_mapping = {
|
||||
"parameters": "variables",
|
||||
"prologue": "opener",
|
||||
"quote": "show_quote",
|
||||
"system": "prompt",
|
||||
"rerank_id": "rerank_model",
|
||||
"vector_similarity_weight": "keywords_similarity_weight",
|
||||
"do_refer": "show_quotation",
|
||||
}
|
||||
key_list = ["similarity_threshold", "vector_similarity_weight", "top_n", "rerank_id"]
|
||||
for res in chats:
|
||||
renamed_dict = {}
|
||||
for key, value in res["prompt_config"].items():
|
||||
new_key = key_mapping.get(key, key)
|
||||
renamed_dict[new_key] = value
|
||||
res["prompt"] = renamed_dict
|
||||
del res["prompt_config"]
|
||||
new_dict = {"similarity_threshold": res["similarity_threshold"], "keywords_similarity_weight": 1 - res["vector_similarity_weight"], "top_n": res["top_n"], "rerank_model": res["rerank_id"]}
|
||||
res["prompt"].update(new_dict)
|
||||
for key in key_list:
|
||||
del res[key]
|
||||
res["llm"] = res.pop("llm_setting")
|
||||
res["llm"]["model_name"] = res.pop("llm_id")
|
||||
kb_list = []
|
||||
for kb_id in res["kb_ids"]:
|
||||
kb = KnowledgebaseService.query(id=kb_id)
|
||||
if not kb:
|
||||
logging.warning(f"The kb {kb_id} does not exist.")
|
||||
continue
|
||||
kb_list.append(kb[0].to_json())
|
||||
del res["kb_ids"]
|
||||
res["datasets"] = kb_list
|
||||
res["avatar"] = res.pop("icon")
|
||||
list_assts.append(res)
|
||||
return get_result(data=list_assts)
|
||||
527
api/apps/sdk/dataset.py
Normal file
527
api/apps/sdk/dataset.py
Normal file
@@ -0,0 +1,527 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
import logging
|
||||
import os
|
||||
import json
|
||||
from flask import request
|
||||
from peewee import OperationalError
|
||||
from api import settings
|
||||
from api.db import FileSource, StatusEnum
|
||||
from api.db.db_models import File
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import (
|
||||
deep_merge,
|
||||
get_error_argument_result,
|
||||
get_error_data_result,
|
||||
get_error_operating_result,
|
||||
get_error_permission_result,
|
||||
get_parser_config,
|
||||
get_result,
|
||||
remap_dictionary_keys,
|
||||
token_required,
|
||||
verify_embedding_availability,
|
||||
)
|
||||
from api.utils.validation_utils import (
|
||||
CreateDatasetReq,
|
||||
DeleteDatasetReq,
|
||||
ListDatasetReq,
|
||||
UpdateDatasetReq,
|
||||
validate_and_parse_json_request,
|
||||
validate_and_parse_request_args,
|
||||
)
|
||||
from rag.nlp import search
|
||||
from rag.settings import PAGERANK_FLD
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["POST"]) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
"""
|
||||
Create a new dataset.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset creation parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Dataset permission.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Chunking method.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Parser configuration.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
"""
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
req, err = validate_and_parse_json_request(request, CreateDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
if KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value):
|
||||
return get_error_operating_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
|
||||
req["parser_config"] = get_parser_config(req["parser_id"], req["parser_config"])
|
||||
req["id"] = get_uuid()
|
||||
req["tenant_id"] = tenant_id
|
||||
req["created_by"] = tenant_id
|
||||
|
||||
ok, t = TenantService.get_by_id(tenant_id)
|
||||
if not ok:
|
||||
return get_error_permission_result(message="Tenant not found")
|
||||
|
||||
if not req.get("embd_id"):
|
||||
req["embd_id"] = t.embd_id
|
||||
else:
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
if not KnowledgebaseService.save(**req):
|
||||
return get_error_data_result(message="Create dataset error.(Database error)")
|
||||
|
||||
ok, k = KnowledgebaseService.get_by_id(req["id"])
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["DELETE"]) # noqa: F821
|
||||
@token_required
|
||||
def delete(tenant_id):
|
||||
"""
|
||||
Delete datasets.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset deletion parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
required:
|
||||
- ids
|
||||
properties:
|
||||
ids:
|
||||
type: array or null
|
||||
items:
|
||||
type: string
|
||||
description: |
|
||||
Specifies the datasets to delete:
|
||||
- If `null`, all datasets will be deleted.
|
||||
- If an array of IDs, only the specified datasets will be deleted.
|
||||
- If an empty array, no datasets will be deleted.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req, err = validate_and_parse_json_request(request, DeleteDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
kb_id_instance_pairs = []
|
||||
if req["ids"] is None:
|
||||
kbs = KnowledgebaseService.query(tenant_id=tenant_id)
|
||||
for kb in kbs:
|
||||
kb_id_instance_pairs.append((kb.id, kb))
|
||||
|
||||
else:
|
||||
error_kb_ids = []
|
||||
for kb_id in req["ids"]:
|
||||
kb = KnowledgebaseService.get_or_none(id=kb_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
error_kb_ids.append(kb_id)
|
||||
continue
|
||||
kb_id_instance_pairs.append((kb_id, kb))
|
||||
if len(error_kb_ids) > 0:
|
||||
return get_error_permission_result(message=f"""User '{tenant_id}' lacks permission for datasets: '{", ".join(error_kb_ids)}'""")
|
||||
|
||||
errors = []
|
||||
success_count = 0
|
||||
for kb_id, kb in kb_id_instance_pairs:
|
||||
for doc in DocumentService.query(kb_id=kb_id):
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
errors.append(f"Remove document '{doc.id}' error for dataset '{kb_id}'")
|
||||
continue
|
||||
f2d = File2DocumentService.get_by_document_id(doc.id)
|
||||
FileService.filter_delete(
|
||||
[
|
||||
File.source_type == FileSource.KNOWLEDGEBASE,
|
||||
File.id == f2d[0].file_id,
|
||||
]
|
||||
)
|
||||
File2DocumentService.delete_by_document_id(doc.id)
|
||||
FileService.filter_delete([File.source_type == FileSource.KNOWLEDGEBASE, File.type == "folder", File.name == kb.name])
|
||||
if not KnowledgebaseService.delete_by_id(kb_id):
|
||||
errors.append(f"Delete dataset error for {kb_id}")
|
||||
continue
|
||||
success_count += 1
|
||||
|
||||
if not errors:
|
||||
return get_result()
|
||||
|
||||
error_message = f"Successfully deleted {success_count} datasets, {len(errors)} failed. Details: {'; '.join(errors)[:128]}..."
|
||||
if success_count == 0:
|
||||
return get_error_data_result(message=error_message)
|
||||
|
||||
return get_result(data={"success_count": success_count, "errors": errors[:5]}, message=error_message)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets/<dataset_id>", methods=["PUT"]) # noqa: F821
|
||||
@token_required
|
||||
def update(tenant_id, dataset_id):
|
||||
"""
|
||||
Update a dataset.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: dataset_id
|
||||
type: string
|
||||
required: true
|
||||
description: ID of the dataset to update.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
- in: body
|
||||
name: body
|
||||
description: Dataset update parameters.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: New name of the dataset.
|
||||
avatar:
|
||||
type: string
|
||||
description: Updated base64 encoding of the avatar.
|
||||
description:
|
||||
type: string
|
||||
description: Updated description of the dataset.
|
||||
embedding_model:
|
||||
type: string
|
||||
description: Updated embedding model Name.
|
||||
permission:
|
||||
type: string
|
||||
enum: ['me', 'team']
|
||||
description: Updated dataset permission.
|
||||
chunk_method:
|
||||
type: string
|
||||
enum: ["naive", "book", "email", "laws", "manual", "one", "paper",
|
||||
"picture", "presentation", "qa", "table", "tag"
|
||||
]
|
||||
description: Updated chunking method.
|
||||
pagerank:
|
||||
type: integer
|
||||
description: Updated page rank.
|
||||
parser_config:
|
||||
type: object
|
||||
description: Updated parser configuration.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
# Field name transformations during model dump:
|
||||
# | Original | Dump Output |
|
||||
# |----------------|-------------|
|
||||
# | embedding_model| embd_id |
|
||||
# | chunk_method | parser_id |
|
||||
extras = {"dataset_id": dataset_id}
|
||||
req, err = validate_and_parse_json_request(request, UpdateDatasetReq, extras=extras, exclude_unset=True)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
if not req:
|
||||
return get_error_argument_result(message="No properties were modified")
|
||||
|
||||
try:
|
||||
kb = KnowledgebaseService.get_or_none(id=dataset_id, tenant_id=tenant_id)
|
||||
if kb is None:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{dataset_id}'")
|
||||
|
||||
if req.get("parser_config"):
|
||||
req["parser_config"] = deep_merge(kb.parser_config, req["parser_config"])
|
||||
|
||||
if (chunk_method := req.get("parser_id")) and chunk_method != kb.parser_id:
|
||||
if not req.get("parser_config"):
|
||||
req["parser_config"] = get_parser_config(chunk_method, None)
|
||||
elif "parser_config" in req and not req["parser_config"]:
|
||||
del req["parser_config"]
|
||||
|
||||
if "name" in req and req["name"].lower() != kb.name.lower():
|
||||
exists = KnowledgebaseService.get_or_none(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
if exists:
|
||||
return get_error_data_result(message=f"Dataset name '{req['name']}' already exists")
|
||||
|
||||
if "embd_id" in req:
|
||||
if not req["embd_id"]:
|
||||
req["embd_id"] = kb.embd_id
|
||||
if kb.chunk_num != 0 and req["embd_id"] != kb.embd_id:
|
||||
return get_error_data_result(message=f"When chunk_num ({kb.chunk_num}) > 0, embedding_model must remain {kb.embd_id}")
|
||||
ok, err = verify_embedding_availability(req["embd_id"], tenant_id)
|
||||
if not ok:
|
||||
return err
|
||||
|
||||
if "pagerank" in req and req["pagerank"] != kb.pagerank:
|
||||
if os.environ.get("DOC_ENGINE", "elasticsearch") == "infinity":
|
||||
return get_error_argument_result(message="'pagerank' can only be set when doc_engine is elasticsearch")
|
||||
|
||||
if req["pagerank"] > 0:
|
||||
settings.docStoreConn.update({"kb_id": kb.id}, {PAGERANK_FLD: req["pagerank"]}, search.index_name(kb.tenant_id), kb.id)
|
||||
else:
|
||||
# Elasticsearch requires PAGERANK_FLD be non-zero!
|
||||
settings.docStoreConn.update({"exists": PAGERANK_FLD}, {"remove": PAGERANK_FLD}, search.index_name(kb.tenant_id), kb.id)
|
||||
|
||||
if not KnowledgebaseService.update_by_id(kb.id, req):
|
||||
return get_error_data_result(message="Update dataset error.(Database error)")
|
||||
|
||||
ok, k = KnowledgebaseService.get_by_id(kb.id)
|
||||
if not ok:
|
||||
return get_error_data_result(message="Dataset created failed")
|
||||
|
||||
response_data = remap_dictionary_keys(k.to_dict())
|
||||
return get_result(data=response_data)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
|
||||
@manager.route("/datasets", methods=["GET"]) # noqa: F821
|
||||
@token_required
|
||||
def list_datasets(tenant_id):
|
||||
"""
|
||||
List datasets.
|
||||
---
|
||||
tags:
|
||||
- Datasets
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: id
|
||||
type: string
|
||||
required: false
|
||||
description: Dataset ID to filter.
|
||||
- in: query
|
||||
name: name
|
||||
type: string
|
||||
required: false
|
||||
description: Dataset name to filter.
|
||||
- in: query
|
||||
name: page
|
||||
type: integer
|
||||
required: false
|
||||
default: 1
|
||||
description: Page number.
|
||||
- in: query
|
||||
name: page_size
|
||||
type: integer
|
||||
required: false
|
||||
default: 30
|
||||
description: Number of items per page.
|
||||
- in: query
|
||||
name: orderby
|
||||
type: string
|
||||
required: false
|
||||
default: "create_time"
|
||||
description: Field to order by.
|
||||
- in: query
|
||||
name: desc
|
||||
type: boolean
|
||||
required: false
|
||||
default: true
|
||||
description: Order in descending.
|
||||
- in: header
|
||||
name: Authorization
|
||||
type: string
|
||||
required: true
|
||||
description: Bearer token for authentication.
|
||||
responses:
|
||||
200:
|
||||
description: Successful operation.
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
"""
|
||||
args, err = validate_and_parse_request_args(request, ListDatasetReq)
|
||||
if err is not None:
|
||||
return get_error_argument_result(err)
|
||||
|
||||
try:
|
||||
kb_id = request.args.get("id")
|
||||
name = args.get("name")
|
||||
if kb_id:
|
||||
kbs = KnowledgebaseService.get_kb_by_id(kb_id, tenant_id)
|
||||
|
||||
if not kbs:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{kb_id}'")
|
||||
if name:
|
||||
kbs = KnowledgebaseService.get_kb_by_name(name, tenant_id)
|
||||
if not kbs:
|
||||
return get_error_permission_result(message=f"User '{tenant_id}' lacks permission for dataset '{name}'")
|
||||
|
||||
tenants = TenantService.get_joined_tenants_by_user_id(tenant_id)
|
||||
kbs = KnowledgebaseService.get_list(
|
||||
[m["tenant_id"] for m in tenants],
|
||||
tenant_id,
|
||||
args["page"],
|
||||
args["page_size"],
|
||||
args["orderby"],
|
||||
args["desc"],
|
||||
kb_id,
|
||||
name,
|
||||
)
|
||||
|
||||
response_data_list = []
|
||||
for kb in kbs:
|
||||
response_data_list.append(remap_dictionary_keys(kb))
|
||||
return get_result(data=response_data_list)
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return get_error_data_result(message="Database operation failed")
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/knowledge_graph', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def knowledge_graph(tenant_id,dataset_id):
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
req = {
|
||||
"kb_id": [dataset_id],
|
||||
"knowledge_graph_kwd": ["graph"]
|
||||
}
|
||||
|
||||
obj = {"graph": {}, "mind_map": {}}
|
||||
if not settings.docStoreConn.indexExist(search.index_name(kb.tenant_id), dataset_id):
|
||||
return get_result(data=obj)
|
||||
sres = settings.retrievaler.search(req, search.index_name(kb.tenant_id), [dataset_id])
|
||||
if not len(sres.ids):
|
||||
return get_result(data=obj)
|
||||
|
||||
for id in sres.ids[:1]:
|
||||
ty = sres.field[id]["knowledge_graph_kwd"]
|
||||
try:
|
||||
content_json = json.loads(sres.field[id]["content_with_weight"])
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
obj[ty] = content_json
|
||||
|
||||
if "nodes" in obj["graph"]:
|
||||
obj["graph"]["nodes"] = sorted(obj["graph"]["nodes"], key=lambda x: x.get("pagerank", 0), reverse=True)[:256]
|
||||
if "edges" in obj["graph"]:
|
||||
node_id_set = { o["id"] for o in obj["graph"]["nodes"] }
|
||||
filtered_edges = [o for o in obj["graph"]["edges"] if o["source"] != o["target"] and o["source"] in node_id_set and o["target"] in node_id_set]
|
||||
obj["graph"]["edges"] = sorted(filtered_edges, key=lambda x: x.get("weight", 0), reverse=True)[:128]
|
||||
return get_result(data=obj)
|
||||
|
||||
@manager.route('/datasets/<dataset_id>/knowledge_graph', methods=['DELETE']) # noqa: F821
|
||||
@token_required
|
||||
def delete_knowledge_graph(tenant_id,dataset_id):
|
||||
if not KnowledgebaseService.accessible(dataset_id, tenant_id):
|
||||
return get_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR
|
||||
)
|
||||
_, kb = KnowledgebaseService.get_by_id(dataset_id)
|
||||
settings.docStoreConn.delete({"knowledge_graph_kwd": ["graph", "subgraph", "entity", "relation"]}, search.index_name(kb.tenant_id), dataset_id)
|
||||
|
||||
return get_result(data=True)
|
||||
104
api/apps/sdk/dify_retrieval.py
Normal file
104
api/apps/sdk/dify_retrieval.py
Normal file
@@ -0,0 +1,104 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
|
||||
from flask import request, jsonify
|
||||
|
||||
from api.db import LLMType
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api import settings
|
||||
from api.utils.api_utils import validate_request, build_error_result, apikey_required
|
||||
from rag.app.tag import label_question
|
||||
from api.db.services.dialog_service import meta_filter, convert_conditions
|
||||
|
||||
|
||||
@manager.route('/dify/retrieval', methods=['POST']) # noqa: F821
|
||||
@apikey_required
|
||||
@validate_request("knowledge_id", "query")
|
||||
def retrieval(tenant_id):
|
||||
req = request.json
|
||||
question = req["query"]
|
||||
kb_id = req["knowledge_id"]
|
||||
use_kg = req.get("use_kg", False)
|
||||
retrieval_setting = req.get("retrieval_setting", {})
|
||||
similarity_threshold = float(retrieval_setting.get("score_threshold", 0.0))
|
||||
top = int(retrieval_setting.get("top_k", 1024))
|
||||
metadata_condition = req.get("metadata_condition",{})
|
||||
metas = DocumentService.get_meta_by_kbs([kb_id])
|
||||
|
||||
doc_ids = []
|
||||
try:
|
||||
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return build_error_result(message="Knowledgebase not found!", code=settings.RetCode.NOT_FOUND)
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING.value, llm_name=kb.embd_id)
|
||||
print(metadata_condition)
|
||||
print("after",convert_conditions(metadata_condition))
|
||||
doc_ids.extend(meta_filter(metas, convert_conditions(metadata_condition)))
|
||||
print("doc_ids",doc_ids)
|
||||
if not doc_ids and metadata_condition is not None:
|
||||
doc_ids = ['-999']
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question,
|
||||
embd_mdl,
|
||||
kb.tenant_id,
|
||||
[kb_id],
|
||||
page=1,
|
||||
page_size=top,
|
||||
similarity_threshold=similarity_threshold,
|
||||
vector_similarity_weight=0.3,
|
||||
top=top,
|
||||
doc_ids=doc_ids,
|
||||
rank_feature=label_question(question, [kb])
|
||||
)
|
||||
|
||||
if use_kg:
|
||||
ck = settings.kg_retrievaler.retrieval(question,
|
||||
[tenant_id],
|
||||
[kb_id],
|
||||
embd_mdl,
|
||||
LLMBundle(kb.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
ranks["chunks"].insert(0, ck)
|
||||
|
||||
records = []
|
||||
for c in ranks["chunks"]:
|
||||
e, doc = DocumentService.get_by_id( c["doc_id"])
|
||||
c.pop("vector", None)
|
||||
meta = getattr(doc, 'meta_fields', {})
|
||||
meta["doc_id"] = c["doc_id"]
|
||||
records.append({
|
||||
"content": c["content_with_weight"],
|
||||
"score": c["similarity"],
|
||||
"title": c["docnm_kwd"],
|
||||
"metadata": meta
|
||||
})
|
||||
|
||||
return jsonify({"records": records})
|
||||
except Exception as e:
|
||||
if str(e).find("not_found") > 0:
|
||||
return build_error_result(
|
||||
message='No chunk found! Check the chunk status please!',
|
||||
code=settings.RetCode.NOT_FOUND
|
||||
)
|
||||
logging.exception(e)
|
||||
return build_error_result(message=str(e), code=settings.RetCode.SERVER_ERROR)
|
||||
|
||||
|
||||
1497
api/apps/sdk/doc.py
Normal file
1497
api/apps/sdk/doc.py
Normal file
File diff suppressed because it is too large
Load Diff
738
api/apps/sdk/files.py
Normal file
738
api/apps/sdk/files.py
Normal file
@@ -0,0 +1,738 @@
|
||||
import pathlib
|
||||
import re
|
||||
|
||||
import flask
|
||||
from flask import request
|
||||
from pathlib import Path
|
||||
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils.api_utils import server_error_response, token_required
|
||||
from api.utils import get_uuid
|
||||
from api.db import FileType
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.file_service import FileService
|
||||
from api.utils.api_utils import get_json_result
|
||||
from api.utils.file_utils import filename_type
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
@manager.route('/file/upload', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def upload(tenant_id):
|
||||
"""
|
||||
Upload a file to the system.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: formData
|
||||
name: file
|
||||
type: file
|
||||
required: true
|
||||
description: The file to upload
|
||||
- in: formData
|
||||
name: parent_id
|
||||
type: string
|
||||
description: Parent folder ID where the file will be uploaded. Optional.
|
||||
responses:
|
||||
200:
|
||||
description: Successfully uploaded the file.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: File ID
|
||||
name:
|
||||
type: string
|
||||
description: File name
|
||||
size:
|
||||
type: integer
|
||||
description: File size in bytes
|
||||
type:
|
||||
type: string
|
||||
description: File type (e.g., document, folder)
|
||||
"""
|
||||
pf_id = request.form.get("parent_id")
|
||||
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
if 'file' not in request.files:
|
||||
return get_json_result(data=False, message='No file part!', code=400)
|
||||
file_objs = request.files.getlist('file')
|
||||
|
||||
for file_obj in file_objs:
|
||||
if file_obj.filename == '':
|
||||
return get_json_result(data=False, message='No selected file!', code=400)
|
||||
|
||||
file_res = []
|
||||
|
||||
try:
|
||||
e, pf_folder = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_json_result(data=False, message="Can't find this folder!", code=404)
|
||||
|
||||
for file_obj in file_objs:
|
||||
# Handle file path
|
||||
full_path = '/' + file_obj.filename
|
||||
file_obj_names = full_path.split('/')
|
||||
file_len = len(file_obj_names)
|
||||
|
||||
# Get folder path ID
|
||||
file_id_list = FileService.get_id_list_by_id(pf_id, file_obj_names, 1, [pf_id])
|
||||
len_id_list = len(file_id_list)
|
||||
|
||||
# Crete file folder
|
||||
if file_len != len_id_list:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 1])
|
||||
if not e:
|
||||
return get_json_result(data=False, message="Folder not found!", code=404)
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 1], file_obj_names, len_id_list)
|
||||
else:
|
||||
e, file = FileService.get_by_id(file_id_list[len_id_list - 2])
|
||||
if not e:
|
||||
return get_json_result(data=False, message="Folder not found!", code=404)
|
||||
last_folder = FileService.create_folder(file, file_id_list[len_id_list - 2], file_obj_names, len_id_list)
|
||||
|
||||
filetype = filename_type(file_obj_names[file_len - 1])
|
||||
location = file_obj_names[file_len - 1]
|
||||
while STORAGE_IMPL.obj_exist(last_folder.id, location):
|
||||
location += "_"
|
||||
blob = file_obj.read()
|
||||
filename = duplicate_name(FileService.query, name=file_obj_names[file_len - 1], parent_id=last_folder.id)
|
||||
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": last_folder.id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
}
|
||||
file = FileService.insert(file)
|
||||
STORAGE_IMPL.put(last_folder.id, location, blob)
|
||||
file_res.append(file.to_json())
|
||||
return get_json_result(data=file_res)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/create', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def create(tenant_id):
|
||||
"""
|
||||
Create a new file or folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: File creation parameters
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
description: Name of the file/folder
|
||||
parent_id:
|
||||
type: string
|
||||
description: Parent folder ID. Optional.
|
||||
type:
|
||||
type: string
|
||||
enum: ["FOLDER", "VIRTUAL"]
|
||||
description: Type of the file
|
||||
responses:
|
||||
200:
|
||||
description: File created successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
"""
|
||||
req = request.json
|
||||
pf_id = request.json.get("parent_id")
|
||||
input_file_type = request.json.get("type")
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
|
||||
try:
|
||||
if not FileService.is_parent_folder_exist(pf_id):
|
||||
return get_json_result(data=False, message="Parent Folder Doesn't Exist!", code=400)
|
||||
if FileService.query(name=req["name"], parent_id=pf_id):
|
||||
return get_json_result(data=False, message="Duplicated folder name in the same folder.", code=409)
|
||||
|
||||
if input_file_type == FileType.FOLDER.value:
|
||||
file_type = FileType.FOLDER.value
|
||||
else:
|
||||
file_type = FileType.VIRTUAL.value
|
||||
|
||||
file = FileService.insert({
|
||||
"id": get_uuid(),
|
||||
"parent_id": pf_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": req["name"],
|
||||
"location": "",
|
||||
"size": 0,
|
||||
"type": file_type
|
||||
})
|
||||
|
||||
return get_json_result(data=file.to_json())
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/list', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def list_files(tenant_id):
|
||||
"""
|
||||
List files under a specific folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: parent_id
|
||||
type: string
|
||||
description: Folder ID to list files from
|
||||
- in: query
|
||||
name: keywords
|
||||
type: string
|
||||
description: Search keyword filter
|
||||
- in: query
|
||||
name: page
|
||||
type: integer
|
||||
default: 1
|
||||
description: Page number
|
||||
- in: query
|
||||
name: page_size
|
||||
type: integer
|
||||
default: 15
|
||||
description: Number of results per page
|
||||
- in: query
|
||||
name: orderby
|
||||
type: string
|
||||
default: "create_time"
|
||||
description: Sort by field
|
||||
- in: query
|
||||
name: desc
|
||||
type: boolean
|
||||
default: true
|
||||
description: Descending order
|
||||
responses:
|
||||
200:
|
||||
description: Successfully retrieved file list.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
total:
|
||||
type: integer
|
||||
files:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
size:
|
||||
type: integer
|
||||
create_time:
|
||||
type: string
|
||||
format: date-time
|
||||
"""
|
||||
pf_id = request.args.get("parent_id")
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 1))
|
||||
items_per_page = int(request.args.get("page_size", 15))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
desc = request.args.get("desc", True)
|
||||
|
||||
if not pf_id:
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
pf_id = root_folder["id"]
|
||||
FileService.init_knowledgebase_docs(pf_id, tenant_id)
|
||||
|
||||
try:
|
||||
e, file = FileService.get_by_id(pf_id)
|
||||
if not e:
|
||||
return get_json_result(message="Folder not found!", code=404)
|
||||
|
||||
files, total = FileService.get_by_pf_id(tenant_id, pf_id, page_number, items_per_page, orderby, desc, keywords)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(pf_id)
|
||||
if not parent_folder:
|
||||
return get_json_result(message="File not found!", code=404)
|
||||
|
||||
return get_json_result(data={"total": total, "files": files, "parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/root_folder', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def get_root_folder(tenant_id):
|
||||
"""
|
||||
Get user's root folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Root folder information
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
properties:
|
||||
root_folder:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
"""
|
||||
try:
|
||||
root_folder = FileService.get_root_folder(tenant_id)
|
||||
return get_json_result(data={"root_folder": root_folder})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/parent_folder', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def get_parent_folder():
|
||||
"""
|
||||
Get parent folder info of a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: file_id
|
||||
type: string
|
||||
required: true
|
||||
description: Target file ID
|
||||
responses:
|
||||
200:
|
||||
description: Parent folder information
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
properties:
|
||||
parent_folder:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
"""
|
||||
file_id = request.args.get("file_id")
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_json_result(message="Folder not found!", code=404)
|
||||
|
||||
parent_folder = FileService.get_parent_folder(file_id)
|
||||
return get_json_result(data={"parent_folder": parent_folder.to_json()})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/all_parent_folder', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def get_all_parent_folders(tenant_id):
|
||||
"""
|
||||
Get all parent folders of a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: file_id
|
||||
type: string
|
||||
required: true
|
||||
description: Target file ID
|
||||
responses:
|
||||
200:
|
||||
description: All parent folders of the file
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: object
|
||||
properties:
|
||||
parent_folders:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
"""
|
||||
file_id = request.args.get("file_id")
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_json_result(message="Folder not found!", code=404)
|
||||
|
||||
parent_folders = FileService.get_all_parent_folders(file_id)
|
||||
parent_folders_res = [folder.to_json() for folder in parent_folders]
|
||||
return get_json_result(data={"parent_folders": parent_folders_res})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/rm', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def rm(tenant_id):
|
||||
"""
|
||||
Delete one or multiple files/folders.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Files to delete
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
file_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: List of file IDs to delete
|
||||
responses:
|
||||
200:
|
||||
description: Successfully deleted files
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: boolean
|
||||
example: true
|
||||
"""
|
||||
req = request.json
|
||||
file_ids = req["file_ids"]
|
||||
try:
|
||||
for file_id in file_ids:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_json_result(message="File or Folder not found!", code=404)
|
||||
if not file.tenant_id:
|
||||
return get_json_result(message="Tenant not found!", code=404)
|
||||
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_id_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for inner_file_id in file_id_list:
|
||||
e, file = FileService.get_by_id(inner_file_id)
|
||||
if not e:
|
||||
return get_json_result(message="File not found!", code=404)
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
FileService.delete_folder_by_pf_id(tenant_id, file_id)
|
||||
else:
|
||||
STORAGE_IMPL.rm(file.parent_id, file.location)
|
||||
if not FileService.delete(file):
|
||||
return get_json_result(message="Database error (File removal)!", code=500)
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(file_id)
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_json_result(message="Document not found!", code=404)
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_json_result(message="Tenant not found!", code=404)
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_json_result(message="Database error (Document removal)!", code=500)
|
||||
File2DocumentService.delete_by_file_id(file_id)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/rename', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def rename(tenant_id):
|
||||
"""
|
||||
Rename a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Rename file
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
file_id:
|
||||
type: string
|
||||
description: Target file ID
|
||||
name:
|
||||
type: string
|
||||
description: New name for the file
|
||||
responses:
|
||||
200:
|
||||
description: File renamed successfully
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: boolean
|
||||
example: true
|
||||
"""
|
||||
req = request.json
|
||||
try:
|
||||
e, file = FileService.get_by_id(req["file_id"])
|
||||
if not e:
|
||||
return get_json_result(message="File not found!", code=404)
|
||||
|
||||
if file.type != FileType.FOLDER.value and pathlib.Path(req["name"].lower()).suffix != pathlib.Path(file.name.lower()).suffix:
|
||||
return get_json_result(data=False, message="The extension of file can't be changed", code=400)
|
||||
|
||||
for existing_file in FileService.query(name=req["name"], pf_id=file.parent_id):
|
||||
if existing_file.name == req["name"]:
|
||||
return get_json_result(data=False, message="Duplicated file name in the same folder.", code=409)
|
||||
|
||||
if not FileService.update_by_id(req["file_id"], {"name": req["name"]}):
|
||||
return get_json_result(message="Database error (File rename)!", code=500)
|
||||
|
||||
informs = File2DocumentService.get_by_file_id(req["file_id"])
|
||||
if informs:
|
||||
if not DocumentService.update_by_id(informs[0].document_id, {"name": req["name"]}):
|
||||
return get_json_result(message="Database error (Document rename)!", code=500)
|
||||
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/get/<file_id>', methods=['GET']) # noqa: F821
|
||||
@token_required
|
||||
def get(tenant_id,file_id):
|
||||
"""
|
||||
Download a file.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
produces:
|
||||
- application/octet-stream
|
||||
parameters:
|
||||
- in: path
|
||||
name: file_id
|
||||
type: string
|
||||
required: true
|
||||
description: File ID to download
|
||||
responses:
|
||||
200:
|
||||
description: File stream
|
||||
schema:
|
||||
type: file
|
||||
404:
|
||||
description: File not found
|
||||
"""
|
||||
try:
|
||||
e, file = FileService.get_by_id(file_id)
|
||||
if not e:
|
||||
return get_json_result(message="Document not found!", code=404)
|
||||
|
||||
blob = STORAGE_IMPL.get(file.parent_id, file.location)
|
||||
if not blob:
|
||||
b, n = File2DocumentService.get_storage_address(file_id=file_id)
|
||||
blob = STORAGE_IMPL.get(b, n)
|
||||
|
||||
response = flask.make_response(blob)
|
||||
ext = re.search(r"\.([^.]+)$", file.name)
|
||||
if ext:
|
||||
if file.type == FileType.VISUAL.value:
|
||||
response.headers.set('Content-Type', 'image/%s' % ext.group(1))
|
||||
else:
|
||||
response.headers.set('Content-Type', 'application/%s' % ext.group(1))
|
||||
return response
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/file/mv', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def move(tenant_id):
|
||||
"""
|
||||
Move one or multiple files to another folder.
|
||||
---
|
||||
tags:
|
||||
- File Management
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Move operation
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
src_file_ids:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
description: Source file IDs
|
||||
dest_file_id:
|
||||
type: string
|
||||
description: Destination folder ID
|
||||
responses:
|
||||
200:
|
||||
description: Files moved successfully
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
data:
|
||||
type: boolean
|
||||
example: true
|
||||
"""
|
||||
req = request.json
|
||||
try:
|
||||
file_ids = req["src_file_ids"]
|
||||
parent_id = req["dest_file_id"]
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_dict = {f.id: f for f in files}
|
||||
|
||||
for file_id in file_ids:
|
||||
file = files_dict[file_id]
|
||||
if not file:
|
||||
return get_json_result(message="File or Folder not found!", code=404)
|
||||
if not file.tenant_id:
|
||||
return get_json_result(message="Tenant not found!", code=404)
|
||||
|
||||
fe, _ = FileService.get_by_id(parent_id)
|
||||
if not fe:
|
||||
return get_json_result(message="Parent Folder not found!", code=404)
|
||||
|
||||
FileService.move_file(file_ids, parent_id)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
@manager.route('/file/convert', methods=['POST']) # noqa: F821
|
||||
@token_required
|
||||
def convert(tenant_id):
|
||||
req = request.json
|
||||
kb_ids = req["kb_ids"]
|
||||
file_ids = req["file_ids"]
|
||||
file2documents = []
|
||||
|
||||
try:
|
||||
files = FileService.get_by_ids(file_ids)
|
||||
files_set = dict({file.id: file for file in files})
|
||||
for file_id in file_ids:
|
||||
file = files_set[file_id]
|
||||
if not file:
|
||||
return get_json_result(message="File not found!", code=404)
|
||||
file_ids_list = [file_id]
|
||||
if file.type == FileType.FOLDER.value:
|
||||
file_ids_list = FileService.get_all_innermost_file_ids(file_id, [])
|
||||
for id in file_ids_list:
|
||||
informs = File2DocumentService.get_by_file_id(id)
|
||||
# delete
|
||||
for inform in informs:
|
||||
doc_id = inform.document_id
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
if not e:
|
||||
return get_json_result(message="Document not found!", code=404)
|
||||
tenant_id = DocumentService.get_tenant_id(doc_id)
|
||||
if not tenant_id:
|
||||
return get_json_result(message="Tenant not found!", code=404)
|
||||
if not DocumentService.remove_document(doc, tenant_id):
|
||||
return get_json_result(
|
||||
message="Database error (Document removal)!", code=404)
|
||||
File2DocumentService.delete_by_file_id(id)
|
||||
|
||||
# insert
|
||||
for kb_id in kb_ids:
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
return get_json_result(
|
||||
message="Can't find this knowledgebase!", code=404)
|
||||
e, file = FileService.get_by_id(id)
|
||||
if not e:
|
||||
return get_json_result(
|
||||
message="Can't find this file!", code=404)
|
||||
|
||||
doc = DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"kb_id": kb.id,
|
||||
"parser_id": FileService.get_parser(file.type, file.name, kb.parser_id),
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": tenant_id,
|
||||
"type": file.type,
|
||||
"name": file.name,
|
||||
"suffix": Path(file.name).suffix.lstrip("."),
|
||||
"location": file.location,
|
||||
"size": file.size
|
||||
})
|
||||
file2document = File2DocumentService.insert({
|
||||
"id": get_uuid(),
|
||||
"file_id": id,
|
||||
"document_id": doc.id,
|
||||
})
|
||||
|
||||
file2documents.append(file2document.to_json())
|
||||
return get_json_result(data=file2documents)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
1115
api/apps/sdk/session.py
Normal file
1115
api/apps/sdk/session.py
Normal file
File diff suppressed because it is too large
Load Diff
188
api/apps/search_app.py
Normal file
188
api/apps/search_app.py
Normal file
@@ -0,0 +1,188 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user, login_required
|
||||
|
||||
from api import settings
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import DB
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.search_service import SearchService
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_error_result, get_json_result, not_allowed_parameters, server_error_response, validate_request
|
||||
|
||||
|
||||
@manager.route("/create", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("name")
|
||||
def create():
|
||||
req = request.get_json()
|
||||
search_name = req["name"]
|
||||
description = req.get("description", "")
|
||||
if not isinstance(search_name, str):
|
||||
return get_data_error_result(message="Search name must be string.")
|
||||
if search_name.strip() == "":
|
||||
return get_data_error_result(message="Search name can't be empty.")
|
||||
if len(search_name.encode("utf-8")) > 255:
|
||||
return get_data_error_result(message=f"Search name length is {len(search_name)} which is large than 255.")
|
||||
e, _ = TenantService.get_by_id(current_user.id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Authorized identity.")
|
||||
|
||||
search_name = search_name.strip()
|
||||
search_name = duplicate_name(SearchService.query, name=search_name, tenant_id=current_user.id, status=StatusEnum.VALID.value)
|
||||
|
||||
req["id"] = get_uuid()
|
||||
req["name"] = search_name
|
||||
req["description"] = description
|
||||
req["tenant_id"] = current_user.id
|
||||
req["created_by"] = current_user.id
|
||||
with DB.atomic():
|
||||
try:
|
||||
if not SearchService.save(**req):
|
||||
return get_data_error_result()
|
||||
return get_json_result(data={"search_id": req["id"]})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/update", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("search_id", "name", "search_config", "tenant_id")
|
||||
@not_allowed_parameters("id", "created_by", "create_time", "update_time", "create_date", "update_date", "created_by")
|
||||
def update():
|
||||
req = request.get_json()
|
||||
if not isinstance(req["name"], str):
|
||||
return get_data_error_result(message="Search name must be string.")
|
||||
if req["name"].strip() == "":
|
||||
return get_data_error_result(message="Search name can't be empty.")
|
||||
if len(req["name"].encode("utf-8")) > DATASET_NAME_LIMIT:
|
||||
return get_data_error_result(message=f"Search name length is {len(req['name'])} which is large than {DATASET_NAME_LIMIT}")
|
||||
req["name"] = req["name"].strip()
|
||||
tenant_id = req["tenant_id"]
|
||||
e, _ = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Authorized identity.")
|
||||
|
||||
search_id = req["search_id"]
|
||||
if not SearchService.accessible4deletion(search_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
search_app = SearchService.query(tenant_id=tenant_id, id=search_id)[0]
|
||||
if not search_app:
|
||||
return get_json_result(data=False, message=f"Cannot find search {search_id}", code=settings.RetCode.DATA_ERROR)
|
||||
|
||||
if req["name"].lower() != search_app.name.lower() and len(SearchService.query(name=req["name"], tenant_id=tenant_id, status=StatusEnum.VALID.value)) >= 1:
|
||||
return get_data_error_result(message="Duplicated search name.")
|
||||
|
||||
if "search_config" in req:
|
||||
current_config = search_app.search_config or {}
|
||||
new_config = req["search_config"]
|
||||
|
||||
if not isinstance(new_config, dict):
|
||||
return get_data_error_result(message="search_config must be a JSON object")
|
||||
|
||||
updated_config = {**current_config, **new_config}
|
||||
req["search_config"] = updated_config
|
||||
|
||||
req.pop("search_id", None)
|
||||
req.pop("tenant_id", None)
|
||||
|
||||
updated = SearchService.update_by_id(search_id, req)
|
||||
if not updated:
|
||||
return get_data_error_result(message="Failed to update search")
|
||||
|
||||
e, updated_search = SearchService.get_by_id(search_id)
|
||||
if not e:
|
||||
return get_data_error_result(message="Failed to fetch updated search")
|
||||
|
||||
return get_json_result(data=updated_search.to_dict())
|
||||
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/detail", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def detail():
|
||||
search_id = request.args["search_id"]
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
for tenant in tenants:
|
||||
if SearchService.query(tenant_id=tenant.tenant_id, id=search_id):
|
||||
break
|
||||
else:
|
||||
return get_json_result(data=False, message="Has no permission for this operation.", code=settings.RetCode.OPERATING_ERROR)
|
||||
|
||||
search = SearchService.get_detail(search_id)
|
||||
if not search:
|
||||
return get_data_error_result(message="Can't find this Search App!")
|
||||
return get_json_result(data=search)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def list_search_app():
|
||||
keywords = request.args.get("keywords", "")
|
||||
page_number = int(request.args.get("page", 0))
|
||||
items_per_page = int(request.args.get("page_size", 0))
|
||||
orderby = request.args.get("orderby", "create_time")
|
||||
if request.args.get("desc", "true").lower() == "false":
|
||||
desc = False
|
||||
else:
|
||||
desc = True
|
||||
|
||||
req = request.get_json()
|
||||
owner_ids = req.get("owner_ids", [])
|
||||
try:
|
||||
if not owner_ids:
|
||||
# tenants = TenantService.get_joined_tenants_by_user_id(current_user.id)
|
||||
# tenants = [m["tenant_id"] for m in tenants]
|
||||
tenants = []
|
||||
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, page_number, items_per_page, orderby, desc, keywords)
|
||||
else:
|
||||
tenants = owner_ids
|
||||
search_apps, total = SearchService.get_by_tenant_ids(tenants, current_user.id, 0, 0, orderby, desc, keywords)
|
||||
search_apps = [search_app for search_app in search_apps if search_app["tenant_id"] in tenants]
|
||||
total = len(search_apps)
|
||||
if page_number and items_per_page:
|
||||
search_apps = search_apps[(page_number - 1) * items_per_page : page_number * items_per_page]
|
||||
return get_json_result(data={"search_apps": search_apps, "total": total})
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/rm", methods=["post"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("search_id")
|
||||
def rm():
|
||||
req = request.get_json()
|
||||
search_id = req["search_id"]
|
||||
if not SearchService.accessible4deletion(search_id, current_user.id):
|
||||
return get_json_result(data=False, message="No authorization.", code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
if not SearchService.delete_by_id(search_id):
|
||||
return get_data_error_result(message=f"Failed to delete search App {search_id}")
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
334
api/apps/system_app.py
Normal file
334
api/apps/system_app.py
Normal file
@@ -0,0 +1,334 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License
|
||||
#
|
||||
import logging
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services.api_service import APITokenService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import UserTenantService
|
||||
from api import settings
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
from api.utils.api_utils import (
|
||||
get_json_result,
|
||||
get_data_error_result,
|
||||
server_error_response,
|
||||
generate_confirmation_token,
|
||||
)
|
||||
from api.versions import get_ragflow_version
|
||||
from rag.utils.storage_factory import STORAGE_IMPL, STORAGE_IMPL_TYPE
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from flask import jsonify
|
||||
from api.utils.health_utils import run_health_checks
|
||||
|
||||
@manager.route("/version", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def version():
|
||||
"""
|
||||
Get the current version of the application.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Version retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
version:
|
||||
type: string
|
||||
description: Version number.
|
||||
"""
|
||||
return get_json_result(data=get_ragflow_version())
|
||||
|
||||
|
||||
@manager.route("/status", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def status():
|
||||
"""
|
||||
Get the system status.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: System is operational.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
es:
|
||||
type: object
|
||||
description: Elasticsearch status.
|
||||
storage:
|
||||
type: object
|
||||
description: Storage status.
|
||||
database:
|
||||
type: object
|
||||
description: Database status.
|
||||
503:
|
||||
description: Service unavailable.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
description: Error message.
|
||||
"""
|
||||
res = {}
|
||||
st = timer()
|
||||
try:
|
||||
res["doc_engine"] = settings.docStoreConn.health()
|
||||
res["doc_engine"]["elapsed"] = "{:.1f}".format((timer() - st) * 1000.0)
|
||||
except Exception as e:
|
||||
res["doc_engine"] = {
|
||||
"type": "unknown",
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
STORAGE_IMPL.health()
|
||||
res["storage"] = {
|
||||
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["storage"] = {
|
||||
"storage": STORAGE_IMPL_TYPE.lower(),
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
KnowledgebaseService.get_by_id("x")
|
||||
res["database"] = {
|
||||
"database": settings.DATABASE_TYPE.lower(),
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["database"] = {
|
||||
"database": settings.DATABASE_TYPE.lower(),
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
st = timer()
|
||||
try:
|
||||
if not REDIS_CONN.health():
|
||||
raise Exception("Lost connection!")
|
||||
res["redis"] = {
|
||||
"status": "green",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
}
|
||||
except Exception as e:
|
||||
res["redis"] = {
|
||||
"status": "red",
|
||||
"elapsed": "{:.1f}".format((timer() - st) * 1000.0),
|
||||
"error": str(e),
|
||||
}
|
||||
|
||||
task_executor_heartbeats = {}
|
||||
try:
|
||||
task_executors = REDIS_CONN.smembers("TASKEXE")
|
||||
now = datetime.now().timestamp()
|
||||
for task_executor_id in task_executors:
|
||||
heartbeats = REDIS_CONN.zrangebyscore(task_executor_id, now - 60*30, now)
|
||||
heartbeats = [json.loads(heartbeat) for heartbeat in heartbeats]
|
||||
task_executor_heartbeats[task_executor_id] = heartbeats
|
||||
except Exception:
|
||||
logging.exception("get task executor heartbeats failed!")
|
||||
res["task_executor_heartbeats"] = task_executor_heartbeats
|
||||
|
||||
return get_json_result(data=res)
|
||||
|
||||
|
||||
@manager.route("/healthz", methods=["GET"]) # noqa: F821
|
||||
def healthz():
|
||||
result, all_ok = run_health_checks()
|
||||
return jsonify(result), (200 if all_ok else 500)
|
||||
|
||||
|
||||
@manager.route("/ping", methods=["GET"]) # noqa: F821
|
||||
def ping():
|
||||
return "pong", 200
|
||||
|
||||
|
||||
@manager.route("/new_token", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def new_token():
|
||||
"""
|
||||
Generate a new API token.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: query
|
||||
name: name
|
||||
type: string
|
||||
required: false
|
||||
description: Name of the token.
|
||||
responses:
|
||||
200:
|
||||
description: Token generated successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
token:
|
||||
type: string
|
||||
description: The generated API token.
|
||||
"""
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
obj = {
|
||||
"tenant_id": tenant_id,
|
||||
"token": generate_confirmation_token(tenant_id),
|
||||
"beta": generate_confirmation_token(generate_confirmation_token(tenant_id)).replace("ragflow-", "")[:32],
|
||||
"create_time": current_timestamp(),
|
||||
"create_date": datetime_format(datetime.now()),
|
||||
"update_time": None,
|
||||
"update_date": None,
|
||||
}
|
||||
|
||||
if not APITokenService.save(**obj):
|
||||
return get_data_error_result(message="Fail to new a dialog!")
|
||||
|
||||
return get_json_result(data=obj)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token_list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def token_list():
|
||||
"""
|
||||
List all API tokens for the current user.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: List of API tokens.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tokens:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
token:
|
||||
type: string
|
||||
description: The API token.
|
||||
name:
|
||||
type: string
|
||||
description: Name of the token.
|
||||
create_time:
|
||||
type: string
|
||||
description: Token creation time.
|
||||
"""
|
||||
try:
|
||||
tenants = UserTenantService.query(user_id=current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
|
||||
tenant_id = [tenant for tenant in tenants if tenant.role == 'owner'][0].tenant_id
|
||||
objs = APITokenService.query(tenant_id=tenant_id)
|
||||
objs = [o.to_dict() for o in objs]
|
||||
for o in objs:
|
||||
if not o["beta"]:
|
||||
o["beta"] = generate_confirmation_token(generate_confirmation_token(tenants[0].tenant_id)).replace("ragflow-", "")[:32]
|
||||
APITokenService.filter_update([APIToken.tenant_id == tenant_id, APIToken.token == o["token"]], o)
|
||||
return get_json_result(data=objs)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/token/<token>", methods=["DELETE"]) # noqa: F821
|
||||
@login_required
|
||||
def rm(token):
|
||||
"""
|
||||
Remove an API token.
|
||||
---
|
||||
tags:
|
||||
- API Tokens
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: path
|
||||
name: token
|
||||
type: string
|
||||
required: true
|
||||
description: The API token to remove.
|
||||
responses:
|
||||
200:
|
||||
description: Token removed successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
success:
|
||||
type: boolean
|
||||
description: Deletion status.
|
||||
"""
|
||||
APITokenService.filter_delete(
|
||||
[APIToken.tenant_id == current_user.id, APIToken.token == token]
|
||||
)
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route('/config', methods=['GET']) # noqa: F821
|
||||
def get_config():
|
||||
"""
|
||||
Get system configuration.
|
||||
---
|
||||
tags:
|
||||
- System
|
||||
responses:
|
||||
200:
|
||||
description: Return system configuration
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
registerEnable:
|
||||
type: integer 0 means disabled, 1 means enabled
|
||||
description: Whether user registration is enabled
|
||||
"""
|
||||
return get_json_result(data={
|
||||
"registerEnabled": settings.REGISTER_ENABLED
|
||||
})
|
||||
138
api/apps/tenant_app.py
Normal file
138
api/apps/tenant_app.py
Normal file
@@ -0,0 +1,138 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from flask import request
|
||||
from flask_login import login_required, current_user
|
||||
|
||||
from api import settings
|
||||
from api.apps import smtp_mail_server
|
||||
from api.db import UserTenantRole, StatusEnum
|
||||
from api.db.db_models import UserTenant
|
||||
from api.db.services.user_service import UserTenantService, UserService
|
||||
|
||||
from api.utils import get_uuid, delta_seconds
|
||||
from api.utils.api_utils import get_json_result, validate_request, server_error_response, get_data_error_result
|
||||
from api.utils.web_utils import send_invite_email
|
||||
|
||||
|
||||
@manager.route("/<tenant_id>/user/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_list(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
users = UserTenantService.get_by_tenant_id(tenant_id)
|
||||
for u in users:
|
||||
u["delta_seconds"] = delta_seconds(str(u["update_date"]))
|
||||
return get_json_result(data=users)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user', methods=['POST']) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("email")
|
||||
def create(tenant_id):
|
||||
if current_user.id != tenant_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
req = request.json
|
||||
invite_user_email = req["email"]
|
||||
invite_users = UserService.query(email=invite_user_email)
|
||||
if not invite_users:
|
||||
return get_data_error_result(message="User not found.")
|
||||
|
||||
user_id_to_invite = invite_users[0].id
|
||||
user_tenants = UserTenantService.query(user_id=user_id_to_invite, tenant_id=tenant_id)
|
||||
if user_tenants:
|
||||
user_tenant_role = user_tenants[0].role
|
||||
if user_tenant_role == UserTenantRole.NORMAL:
|
||||
return get_data_error_result(message=f"{invite_user_email} is already in the team.")
|
||||
if user_tenant_role == UserTenantRole.OWNER:
|
||||
return get_data_error_result(message=f"{invite_user_email} is the owner of the team.")
|
||||
return get_data_error_result(message=f"{invite_user_email} is in the team, but the role: {user_tenant_role} is invalid.")
|
||||
|
||||
UserTenantService.save(
|
||||
id=get_uuid(),
|
||||
user_id=user_id_to_invite,
|
||||
tenant_id=tenant_id,
|
||||
invited_by=current_user.id,
|
||||
role=UserTenantRole.INVITE,
|
||||
status=StatusEnum.VALID.value)
|
||||
|
||||
if smtp_mail_server and settings.SMTP_CONF:
|
||||
from threading import Thread
|
||||
|
||||
user_name = ""
|
||||
_, user = UserService.get_by_id(current_user.id)
|
||||
if user:
|
||||
user_name = user.nickname
|
||||
|
||||
Thread(
|
||||
target=send_invite_email,
|
||||
args=(invite_user_email, settings.MAIL_FRONTEND_URL, tenant_id, user_name or current_user.email),
|
||||
daemon=True
|
||||
).start()
|
||||
|
||||
usr = invite_users[0].to_dict()
|
||||
usr = {k: v for k, v in usr.items() if k in ["id", "avatar", "email", "nickname"]}
|
||||
|
||||
return get_json_result(data=usr)
|
||||
|
||||
|
||||
@manager.route('/<tenant_id>/user/<user_id>', methods=['DELETE']) # noqa: F821
|
||||
@login_required
|
||||
def rm(tenant_id, user_id):
|
||||
if current_user.id != tenant_id and current_user.id != user_id:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message='No authorization.',
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR)
|
||||
|
||||
try:
|
||||
UserTenantService.filter_delete([UserTenant.tenant_id == tenant_id, UserTenant.user_id == user_id])
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/list", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_list():
|
||||
try:
|
||||
users = UserTenantService.get_tenants_by_user_id(current_user.id)
|
||||
for u in users:
|
||||
u["delta_seconds"] = delta_seconds(str(u["update_date"]))
|
||||
return get_json_result(data=users)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/agree/<tenant_id>", methods=["PUT"]) # noqa: F821
|
||||
@login_required
|
||||
def agree(tenant_id):
|
||||
try:
|
||||
UserTenantService.filter_update([UserTenant.tenant_id == tenant_id, UserTenant.user_id == current_user.id], {"role": UserTenantRole.NORMAL})
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
827
api/apps/user_app.py
Normal file
827
api/apps/user_app.py
Normal file
@@ -0,0 +1,827 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
from datetime import datetime
|
||||
|
||||
from flask import redirect, request, session
|
||||
from flask_login import current_user, login_required, login_user, logout_user
|
||||
from werkzeug.security import check_password_hash, generate_password_hash
|
||||
|
||||
from api import settings
|
||||
from api.apps.auth import get_auth_client
|
||||
from api.db import FileType, UserTenantRole
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import get_init_tenant_llm
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService, UserService, UserTenantService
|
||||
from api.utils import (
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
download_img,
|
||||
get_format_time,
|
||||
get_uuid,
|
||||
)
|
||||
from api.utils.api_utils import (
|
||||
construct_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils.crypt import decrypt
|
||||
|
||||
|
||||
@manager.route("/login", methods=["POST", "GET"]) # noqa: F821
|
||||
def login():
|
||||
"""
|
||||
User login endpoint.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Login credentials.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
password:
|
||||
type: string
|
||||
description: User password.
|
||||
responses:
|
||||
200:
|
||||
description: Login successful.
|
||||
schema:
|
||||
type: object
|
||||
401:
|
||||
description: Authentication failed.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
if not request.json:
|
||||
return get_json_result(data=False, code=settings.RetCode.AUTHENTICATION_ERROR, message="Unauthorized!")
|
||||
|
||||
email = request.json.get("email", "")
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message=f"Email: {email} is not registered!",
|
||||
)
|
||||
|
||||
password = request.json.get("password")
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
return get_json_result(data=False, code=settings.RetCode.SERVER_ERROR, message="Fail to crypt password")
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.FORBIDDEN,
|
||||
message="This account has been disabled, please contact the administrator!",
|
||||
)
|
||||
elif user:
|
||||
response_data = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.update_time = (current_timestamp(),)
|
||||
user.update_date = (datetime_format(datetime.now()),)
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return construct_response(data=response_data, auth=user.get_id(), message=msg)
|
||||
else:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message="Email and password do not match!",
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/login/channels", methods=["GET"]) # noqa: F821
|
||||
def get_login_channels():
|
||||
"""
|
||||
Get all supported authentication channels.
|
||||
"""
|
||||
try:
|
||||
channels = []
|
||||
for channel, config in settings.OAUTH_CONFIG.items():
|
||||
channels.append(
|
||||
{
|
||||
"channel": channel,
|
||||
"display_name": config.get("display_name", channel.title()),
|
||||
"icon": config.get("icon", "sso"),
|
||||
}
|
||||
)
|
||||
return get_json_result(data=channels)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(data=[], message=f"Load channels failure, error: {str(e)}", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/login/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_login(channel):
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
state = get_uuid()
|
||||
session["oauth_state"] = state
|
||||
auth_url = auth_cli.get_authorization_url(state)
|
||||
return redirect(auth_url)
|
||||
|
||||
|
||||
@manager.route("/oauth/callback/<channel>", methods=["GET"]) # noqa: F821
|
||||
def oauth_callback(channel):
|
||||
"""
|
||||
Handle the OAuth/OIDC callback for various channels dynamically.
|
||||
"""
|
||||
try:
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise ValueError(f"Invalid channel name: {channel}")
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
# Check the state
|
||||
state = request.args.get("state")
|
||||
if not state or state != session.get("oauth_state"):
|
||||
return redirect("/?error=invalid_state")
|
||||
session.pop("oauth_state", None)
|
||||
|
||||
# Obtain the authorization code
|
||||
code = request.args.get("code")
|
||||
if not code:
|
||||
return redirect("/?error=missing_code")
|
||||
|
||||
# Exchange authorization code for access token
|
||||
token_info = auth_cli.exchange_code_for_token(code)
|
||||
access_token = token_info.get("access_token")
|
||||
if not access_token:
|
||||
return redirect("/?error=token_failed")
|
||||
|
||||
id_token = token_info.get("id_token")
|
||||
|
||||
# Fetch user info
|
||||
user_info = auth_cli.fetch_user_info(access_token, id_token=id_token)
|
||||
if not user_info.email:
|
||||
return redirect("/?error=email_missing")
|
||||
|
||||
# Login or register
|
||||
users = UserService.query(email=user_info.email)
|
||||
user_id = get_uuid()
|
||||
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info.avatar_url)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": get_uuid(),
|
||||
"email": user_info.email,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info.nickname,
|
||||
"login_channel": channel,
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
|
||||
if not users:
|
||||
raise Exception(f"Failed to register {user_info.email}")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {user_info.email} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
# User exists, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
return redirect("/?error=user_inactive")
|
||||
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect(f"/?auth={user.get_id()}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return redirect(f"/?error={str(e)}")
|
||||
|
||||
|
||||
@manager.route("/github_callback", methods=["GET"]) # noqa: F821
|
||||
def github_callback():
|
||||
"""
|
||||
**Deprecated**, Use `/oauth/callback/<channel>` instead.
|
||||
|
||||
GitHub OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
- OAuth
|
||||
parameters:
|
||||
- in: query
|
||||
name: code
|
||||
type: string
|
||||
required: true
|
||||
description: Authorization code from GitHub.
|
||||
responses:
|
||||
200:
|
||||
description: Authentication successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
import requests
|
||||
|
||||
res = requests.post(
|
||||
settings.GITHUB_OAUTH.get("url"),
|
||||
data={
|
||||
"client_id": settings.GITHUB_OAUTH.get("client_id"),
|
||||
"client_secret": settings.GITHUB_OAUTH.get("secret_key"),
|
||||
"code": request.args.get("code"),
|
||||
},
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
res = res.json()
|
||||
if "error" in res:
|
||||
return redirect("/?error=%s" % res["error_description"])
|
||||
|
||||
if "user:email" not in res["scope"].split(","):
|
||||
return redirect("/?error=user:email not in scope")
|
||||
|
||||
session["access_token"] = res["access_token"]
|
||||
session["access_token_from"] = "github"
|
||||
user_info = user_info_from_github(session["access_token"])
|
||||
email_address = user_info["email"]
|
||||
users = UserService.query(email=email_address)
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
# User isn't try to register
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["login"],
|
||||
"login_channel": "github",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
if not users:
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
return redirect("/?error=user_inactive")
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
@manager.route("/feishu_callback", methods=["GET"]) # noqa: F821
|
||||
def feishu_callback():
|
||||
"""
|
||||
Feishu OAuth callback endpoint.
|
||||
---
|
||||
tags:
|
||||
- OAuth
|
||||
parameters:
|
||||
- in: query
|
||||
name: code
|
||||
type: string
|
||||
required: true
|
||||
description: Authorization code from Feishu.
|
||||
responses:
|
||||
200:
|
||||
description: Authentication successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
import requests
|
||||
|
||||
app_access_token_res = requests.post(
|
||||
settings.FEISHU_OAUTH.get("app_access_token_url"),
|
||||
data=json.dumps(
|
||||
{
|
||||
"app_id": settings.FEISHU_OAUTH.get("app_id"),
|
||||
"app_secret": settings.FEISHU_OAUTH.get("app_secret"),
|
||||
}
|
||||
),
|
||||
headers={"Content-Type": "application/json; charset=utf-8"},
|
||||
)
|
||||
app_access_token_res = app_access_token_res.json()
|
||||
if app_access_token_res["code"] != 0:
|
||||
return redirect("/?error=%s" % app_access_token_res)
|
||||
|
||||
res = requests.post(
|
||||
settings.FEISHU_OAUTH.get("user_access_token_url"),
|
||||
data=json.dumps(
|
||||
{
|
||||
"grant_type": settings.FEISHU_OAUTH.get("grant_type"),
|
||||
"code": request.args.get("code"),
|
||||
}
|
||||
),
|
||||
headers={
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {app_access_token_res['app_access_token']}",
|
||||
},
|
||||
)
|
||||
res = res.json()
|
||||
if res["code"] != 0:
|
||||
return redirect("/?error=%s" % res["message"])
|
||||
|
||||
if "contact:user.email:readonly" not in res["data"]["scope"].split():
|
||||
return redirect("/?error=contact:user.email:readonly not in scope")
|
||||
session["access_token"] = res["data"]["access_token"]
|
||||
session["access_token_from"] = "feishu"
|
||||
user_info = user_info_from_feishu(session["access_token"])
|
||||
email_address = user_info["email"]
|
||||
users = UserService.query(email=email_address)
|
||||
user_id = get_uuid()
|
||||
if not users:
|
||||
# User isn't try to register
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info["avatar_url"])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": session["access_token"],
|
||||
"email": email_address,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info["en_name"],
|
||||
"login_channel": "feishu",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
if not users:
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
|
||||
# Try to log in
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return redirect("/?error=%s" % str(e))
|
||||
|
||||
# User has already registered, try to log in
|
||||
user = users[0]
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
return redirect("/?error=user_inactive")
|
||||
user.access_token = get_uuid()
|
||||
login_user(user)
|
||||
user.save()
|
||||
return redirect("/?auth=%s" % user.get_id())
|
||||
|
||||
|
||||
def user_info_from_feishu(access_token):
|
||||
import requests
|
||||
|
||||
headers = {
|
||||
"Content-Type": "application/json; charset=utf-8",
|
||||
"Authorization": f"Bearer {access_token}",
|
||||
}
|
||||
res = requests.get("https://open.feishu.cn/open-apis/authen/v1/user_info", headers=headers)
|
||||
user_info = res.json()["data"]
|
||||
user_info["email"] = None if user_info.get("email") == "" else user_info["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
def user_info_from_github(access_token):
|
||||
import requests
|
||||
|
||||
headers = {"Accept": "application/json", "Authorization": f"token {access_token}"}
|
||||
res = requests.get(f"https://api.github.com/user?access_token={access_token}", headers=headers)
|
||||
user_info = res.json()
|
||||
email_info = requests.get(
|
||||
f"https://api.github.com/user/emails?access_token={access_token}",
|
||||
headers=headers,
|
||||
).json()
|
||||
user_info["email"] = next((email for email in email_info if email["primary"]), None)["email"]
|
||||
return user_info
|
||||
|
||||
|
||||
@manager.route("/logout", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def log_out():
|
||||
"""
|
||||
User logout endpoint.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Logout successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
|
||||
current_user.save()
|
||||
logout_user()
|
||||
return get_json_result(data=True)
|
||||
|
||||
|
||||
@manager.route("/setting", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
def setting_user():
|
||||
"""
|
||||
Update user settings.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: User settings to update.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
nickname:
|
||||
type: string
|
||||
description: New nickname.
|
||||
email:
|
||||
type: string
|
||||
description: New email.
|
||||
responses:
|
||||
200:
|
||||
description: Settings updated successfully.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
update_dict = {}
|
||||
request_data = request.json
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(current_user.password, decrypt(request_data["password"])):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
code=settings.RetCode.AUTHENTICATION_ERROR,
|
||||
message="Password error!",
|
||||
)
|
||||
|
||||
if new_password:
|
||||
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||
|
||||
for k in request_data.keys():
|
||||
if k in [
|
||||
"password",
|
||||
"new_password",
|
||||
"email",
|
||||
"status",
|
||||
"is_superuser",
|
||||
"login_channel",
|
||||
"is_anonymous",
|
||||
"is_active",
|
||||
"is_authenticated",
|
||||
"last_login_time",
|
||||
]:
|
||||
continue
|
||||
update_dict[k] = request_data[k]
|
||||
|
||||
try:
|
||||
UserService.update_by_id(current_user.id, update_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return get_json_result(data=False, message="Update failure!", code=settings.RetCode.EXCEPTION_ERROR)
|
||||
|
||||
|
||||
@manager.route("/info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def user_profile():
|
||||
"""
|
||||
Get user profile information.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: User profile retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
id:
|
||||
type: string
|
||||
description: User ID.
|
||||
nickname:
|
||||
type: string
|
||||
description: User nickname.
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
"""
|
||||
return get_json_result(data=current_user.to_dict())
|
||||
|
||||
|
||||
def rollback_user_registration(user_id):
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def user_register(user_id, user):
|
||||
user["id"] = user_id
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user["nickname"] + "‘s Kingdom",
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL,
|
||||
"rerank_id": settings.RERANK_MDL,
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER,
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
|
||||
tenant_llm = get_init_tenant_llm(user_id)
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
|
||||
@manager.route("/register", methods=["POST"]) # noqa: F821
|
||||
@validate_request("nickname", "email", "password")
|
||||
def user_add():
|
||||
"""
|
||||
Register a new user.
|
||||
---
|
||||
tags:
|
||||
- User
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Registration details.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
nickname:
|
||||
type: string
|
||||
description: User nickname.
|
||||
email:
|
||||
type: string
|
||||
description: User email.
|
||||
password:
|
||||
type: string
|
||||
description: User password.
|
||||
responses:
|
||||
200:
|
||||
description: Registration successful.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
|
||||
if not settings.REGISTER_ENABLED:
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message="User registration is disabled!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
req = request.json
|
||||
email_address = req["email"]
|
||||
|
||||
# Validate the email address
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Invalid email address: {email_address}!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
# Check if the email address is already used
|
||||
if UserService.query(email=email_address):
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"Email: {email_address} has already registered!",
|
||||
code=settings.RetCode.OPERATING_ERROR,
|
||||
)
|
||||
|
||||
# Construct user info data
|
||||
nickname = req["nickname"]
|
||||
user_dict = {
|
||||
"access_token": get_uuid(),
|
||||
"email": email_address,
|
||||
"nickname": nickname,
|
||||
"password": decrypt(req["password"]),
|
||||
"login_channel": "password",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
}
|
||||
|
||||
user_id = get_uuid()
|
||||
try:
|
||||
users = user_register(user_id, user_dict)
|
||||
if not users:
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
user = users[0]
|
||||
login_user(user)
|
||||
return construct_response(
|
||||
data=user.to_json(),
|
||||
auth=user.get_id(),
|
||||
message=f"{nickname}, welcome aboard!",
|
||||
)
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return get_json_result(
|
||||
data=False,
|
||||
message=f"User registration failure, error: {str(e)}",
|
||||
code=settings.RetCode.EXCEPTION_ERROR,
|
||||
)
|
||||
|
||||
|
||||
@manager.route("/tenant_info", methods=["GET"]) # noqa: F821
|
||||
@login_required
|
||||
def tenant_info():
|
||||
"""
|
||||
Get tenant information.
|
||||
---
|
||||
tags:
|
||||
- Tenant
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
responses:
|
||||
200:
|
||||
description: Tenant information retrieved successfully.
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
description: Tenant ID.
|
||||
name:
|
||||
type: string
|
||||
description: Tenant name.
|
||||
llm_id:
|
||||
type: string
|
||||
description: LLM ID.
|
||||
embd_id:
|
||||
type: string
|
||||
description: Embedding model ID.
|
||||
"""
|
||||
try:
|
||||
tenants = TenantService.get_info_by(current_user.id)
|
||||
if not tenants:
|
||||
return get_data_error_result(message="Tenant not found!")
|
||||
return get_json_result(data=tenants[0])
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
|
||||
|
||||
@manager.route("/set_tenant_info", methods=["POST"]) # noqa: F821
|
||||
@login_required
|
||||
@validate_request("tenant_id", "asr_id", "embd_id", "img2txt_id", "llm_id")
|
||||
def set_tenant_info():
|
||||
"""
|
||||
Update tenant information.
|
||||
---
|
||||
tags:
|
||||
- Tenant
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
parameters:
|
||||
- in: body
|
||||
name: body
|
||||
description: Tenant information to update.
|
||||
required: true
|
||||
schema:
|
||||
type: object
|
||||
properties:
|
||||
tenant_id:
|
||||
type: string
|
||||
description: Tenant ID.
|
||||
llm_id:
|
||||
type: string
|
||||
description: LLM ID.
|
||||
embd_id:
|
||||
type: string
|
||||
description: Embedding model ID.
|
||||
asr_id:
|
||||
type: string
|
||||
description: ASR model ID.
|
||||
img2txt_id:
|
||||
type: string
|
||||
description: Image to Text model ID.
|
||||
responses:
|
||||
200:
|
||||
description: Tenant information updated successfully.
|
||||
schema:
|
||||
type: object
|
||||
"""
|
||||
req = request.json
|
||||
try:
|
||||
tid = req.pop("tenant_id")
|
||||
TenantService.update_by_id(tid, req)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
return server_error_response(e)
|
||||
540
api/apps/user_app_fastapi.py
Normal file
540
api/apps/user_app_fastapi.py
Normal file
@@ -0,0 +1,540 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import secrets
|
||||
from datetime import datetime
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from fastapi.responses import RedirectResponse
|
||||
from pydantic import BaseModel, EmailStr
|
||||
try:
|
||||
from werkzeug.security import check_password_hash, generate_password_hash
|
||||
except ImportError:
|
||||
# 如果没有werkzeug,使用passlib作为替代
|
||||
from passlib.context import CryptContext
|
||||
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
||||
|
||||
def check_password_hash(hashed, password):
|
||||
return pwd_context.verify(password, hashed)
|
||||
|
||||
def generate_password_hash(password):
|
||||
return pwd_context.hash(password)
|
||||
|
||||
from api import settings
|
||||
from api.apps.auth import get_auth_client
|
||||
from api.db import FileType, UserTenantRole
|
||||
from api.db.db_models import TenantLLM
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import get_init_tenant_llm
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.user_service import TenantService, UserService, UserTenantService
|
||||
from api.utils import (
|
||||
current_timestamp,
|
||||
datetime_format,
|
||||
download_img,
|
||||
get_format_time,
|
||||
get_uuid,
|
||||
)
|
||||
from api.utils.api_utils import (
|
||||
construct_response,
|
||||
get_data_error_result,
|
||||
get_json_result,
|
||||
server_error_response,
|
||||
validate_request,
|
||||
)
|
||||
from api.utils.crypt import decrypt
|
||||
|
||||
# 创建路由器
|
||||
router = APIRouter()
|
||||
|
||||
# 安全方案
|
||||
security = HTTPBearer()
|
||||
|
||||
# Pydantic模型
|
||||
class LoginRequest(BaseModel):
|
||||
email: EmailStr
|
||||
password: str
|
||||
|
||||
class RegisterRequest(BaseModel):
|
||||
nickname: str
|
||||
email: EmailStr
|
||||
password: str
|
||||
|
||||
class UserSettingRequest(BaseModel):
|
||||
nickname: Optional[str] = None
|
||||
password: Optional[str] = None
|
||||
new_password: Optional[str] = None
|
||||
|
||||
class TenantInfoRequest(BaseModel):
|
||||
tenant_id: str
|
||||
asr_id: str
|
||||
embd_id: str
|
||||
img2txt_id: str
|
||||
llm_id: str
|
||||
|
||||
# 依赖项:获取当前用户
|
||||
async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户"""
|
||||
from api.db import StatusEnum
|
||||
try:
|
||||
from itsdangerous.url_safe import URLSafeTimedSerializer as Serializer
|
||||
except ImportError:
|
||||
# 如果没有itsdangerous,使用jwt作为替代
|
||||
import jwt
|
||||
Serializer = jwt
|
||||
|
||||
jwt = Serializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = credentials.credentials
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication attempt with invalid token format: {len(access_token)} chars"
|
||||
)
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"User {user[0].email} has empty access_token in database"
|
||||
)
|
||||
return user[0]
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.warning(f"load_user got exception {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Invalid access token"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authorization header required"
|
||||
)
|
||||
|
||||
@router.post("/login")
|
||||
async def login(request: LoginRequest):
|
||||
"""
|
||||
用户登录端点
|
||||
"""
|
||||
email = request.email
|
||||
users = UserService.query(email=email)
|
||||
if not users:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Email: {email} is not registered!"
|
||||
)
|
||||
|
||||
password = request.password
|
||||
try:
|
||||
password = decrypt(password)
|
||||
except BaseException:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Fail to crypt password"
|
||||
)
|
||||
|
||||
user = UserService.query_user(email, password)
|
||||
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="This account has been disabled, please contact the administrator!"
|
||||
)
|
||||
elif user:
|
||||
response_data = user.to_json()
|
||||
user.access_token = get_uuid()
|
||||
user.update_time = (current_timestamp(),)
|
||||
user.update_date = (datetime_format(datetime.now()),)
|
||||
user.save()
|
||||
msg = "Welcome back!"
|
||||
return construct_response(data=response_data, auth=user.get_id(), message=msg)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Email and password do not match!"
|
||||
)
|
||||
|
||||
@router.get("/login/channels")
|
||||
async def get_login_channels():
|
||||
"""
|
||||
获取所有支持的身份验证渠道
|
||||
"""
|
||||
try:
|
||||
channels = []
|
||||
for channel, config in settings.OAUTH_CONFIG.items():
|
||||
channels.append(
|
||||
{
|
||||
"channel": channel,
|
||||
"display_name": config.get("display_name", channel.title()),
|
||||
"icon": config.get("icon", "sso"),
|
||||
}
|
||||
)
|
||||
return get_json_result(data=channels)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Load channels failure, error: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/login/{channel}")
|
||||
async def oauth_login(channel: str, request: Request):
|
||||
"""OAuth登录"""
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid channel name: {channel}"
|
||||
)
|
||||
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
state = get_uuid()
|
||||
|
||||
# 在FastAPI中,我们需要使用session来存储state
|
||||
# 这里简化处理,实际应该使用FastAPI的session管理
|
||||
auth_url = auth_cli.get_authorization_url(state)
|
||||
return RedirectResponse(url=auth_url)
|
||||
|
||||
@router.get("/oauth/callback/{channel}")
|
||||
async def oauth_callback(channel: str, request: Request):
|
||||
"""
|
||||
处理各种渠道的OAuth/OIDC回调
|
||||
"""
|
||||
try:
|
||||
channel_config = settings.OAUTH_CONFIG.get(channel)
|
||||
if not channel_config:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid channel name: {channel}"
|
||||
)
|
||||
|
||||
auth_cli = get_auth_client(channel_config)
|
||||
|
||||
# 检查state
|
||||
state = request.query_params.get("state")
|
||||
# 在实际应用中,应该从session中获取state进行比较
|
||||
if not state:
|
||||
return RedirectResponse(url="/?error=invalid_state")
|
||||
|
||||
# 获取授权码
|
||||
code = request.query_params.get("code")
|
||||
if not code:
|
||||
return RedirectResponse(url="/?error=missing_code")
|
||||
|
||||
# 交换授权码获取访问令牌
|
||||
token_info = auth_cli.exchange_code_for_token(code)
|
||||
access_token = token_info.get("access_token")
|
||||
if not access_token:
|
||||
return RedirectResponse(url="/?error=token_failed")
|
||||
|
||||
id_token = token_info.get("id_token")
|
||||
|
||||
# 获取用户信息
|
||||
user_info = auth_cli.fetch_user_info(access_token, id_token=id_token)
|
||||
if not user_info.email:
|
||||
return RedirectResponse(url="/?error=email_missing")
|
||||
|
||||
# 登录或注册
|
||||
users = UserService.query(email=user_info.email)
|
||||
user_id = get_uuid()
|
||||
|
||||
if not users:
|
||||
try:
|
||||
try:
|
||||
avatar = download_img(user_info.avatar_url)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
avatar = ""
|
||||
|
||||
users = user_register(
|
||||
user_id,
|
||||
{
|
||||
"access_token": get_uuid(),
|
||||
"email": user_info.email,
|
||||
"avatar": avatar,
|
||||
"nickname": user_info.nickname,
|
||||
"login_channel": channel,
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
},
|
||||
)
|
||||
|
||||
if not users:
|
||||
raise Exception(f"Failed to register {user_info.email}")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {user_info.email} exists!")
|
||||
|
||||
# 尝试登录
|
||||
user = users[0]
|
||||
return RedirectResponse(url=f"/?auth={user.get_id()}")
|
||||
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
return RedirectResponse(url=f"/?error={str(e)}")
|
||||
|
||||
# 用户存在,尝试登录
|
||||
user = users[0]
|
||||
user.access_token = get_uuid()
|
||||
if user and hasattr(user, 'is_active') and user.is_active == "0":
|
||||
return RedirectResponse(url="/?error=user_inactive")
|
||||
|
||||
user.save()
|
||||
return RedirectResponse(url=f"/?auth={user.get_id()}")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return RedirectResponse(url=f"/?error={str(e)}")
|
||||
|
||||
@router.get("/logout")
|
||||
async def log_out(current_user = Depends(get_current_user)):
|
||||
"""
|
||||
用户登出端点
|
||||
"""
|
||||
current_user.access_token = f"INVALID_{secrets.token_hex(16)}"
|
||||
current_user.save()
|
||||
return get_json_result(data=True)
|
||||
|
||||
@router.post("/setting")
|
||||
async def setting_user(request: UserSettingRequest, current_user = Depends(get_current_user)):
|
||||
"""
|
||||
更新用户设置
|
||||
"""
|
||||
update_dict = {}
|
||||
request_data = request.dict()
|
||||
|
||||
if request_data.get("password"):
|
||||
new_password = request_data.get("new_password")
|
||||
if not check_password_hash(current_user.password, decrypt(request_data["password"])):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Password error!"
|
||||
)
|
||||
|
||||
if new_password:
|
||||
update_dict["password"] = generate_password_hash(decrypt(new_password))
|
||||
|
||||
for k in request_data.keys():
|
||||
if k in [
|
||||
"password",
|
||||
"new_password",
|
||||
"email",
|
||||
"status",
|
||||
"is_superuser",
|
||||
"login_channel",
|
||||
"is_anonymous",
|
||||
"is_active",
|
||||
"is_authenticated",
|
||||
"last_login_time",
|
||||
]:
|
||||
continue
|
||||
update_dict[k] = request_data[k]
|
||||
|
||||
try:
|
||||
UserService.update_by_id(current_user.id, update_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Update failure!"
|
||||
)
|
||||
|
||||
@router.get("/info")
|
||||
async def user_profile(current_user = Depends(get_current_user)):
|
||||
"""
|
||||
获取用户配置文件信息
|
||||
"""
|
||||
return get_json_result(data=current_user.to_dict())
|
||||
|
||||
def rollback_user_registration(user_id):
|
||||
"""回滚用户注册"""
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
TenantLLM.delete().where(TenantLLM.tenant_id == user_id).execute()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def user_register(user_id, user):
|
||||
"""用户注册"""
|
||||
user["id"] = user_id
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user["nickname"] + "'s Kingdom",
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL,
|
||||
"rerank_id": settings.RERANK_MDL,
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER,
|
||||
}
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
|
||||
tenant_llm = get_init_tenant_llm(user_id)
|
||||
|
||||
if not UserService.save(**user):
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
return UserService.query(email=user["email"])
|
||||
|
||||
@router.post("/register")
|
||||
async def user_add(request: RegisterRequest):
|
||||
"""
|
||||
注册新用户
|
||||
"""
|
||||
if not settings.REGISTER_ENABLED:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="User registration is disabled!"
|
||||
)
|
||||
|
||||
email_address = request.email
|
||||
|
||||
# 验证邮箱地址
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,}$", email_address):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Invalid email address: {email_address}!"
|
||||
)
|
||||
|
||||
# 检查邮箱地址是否已被使用
|
||||
if UserService.query(email=email_address):
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_400_BAD_REQUEST,
|
||||
detail=f"Email: {email_address} has already registered!"
|
||||
)
|
||||
|
||||
# 构建用户信息数据
|
||||
nickname = request.nickname
|
||||
user_dict = {
|
||||
"access_token": get_uuid(),
|
||||
"email": email_address,
|
||||
"nickname": nickname,
|
||||
"password": decrypt(request.password),
|
||||
"login_channel": "password",
|
||||
"last_login_time": get_format_time(),
|
||||
"is_superuser": False,
|
||||
}
|
||||
|
||||
user_id = get_uuid()
|
||||
try:
|
||||
users = user_register(user_id, user_dict)
|
||||
if not users:
|
||||
raise Exception(f"Fail to register {email_address}.")
|
||||
if len(users) > 1:
|
||||
raise Exception(f"Same email: {email_address} exists!")
|
||||
user = users[0]
|
||||
return construct_response(
|
||||
data=user.to_json(),
|
||||
auth=user.get_id(),
|
||||
message=f"{nickname}, welcome aboard!",
|
||||
)
|
||||
except Exception as e:
|
||||
rollback_user_registration(user_id)
|
||||
logging.exception(e)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"User registration failure, error: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/tenant_info")
|
||||
async def tenant_info(current_user = Depends(get_current_user)):
|
||||
"""
|
||||
获取租户信息
|
||||
"""
|
||||
try:
|
||||
tenants = TenantService.get_info_by(current_user.id)
|
||||
if not tenants:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Tenant not found!"
|
||||
)
|
||||
return get_json_result(data=tenants[0])
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
|
||||
@router.post("/set_tenant_info")
|
||||
async def set_tenant_info(request: TenantInfoRequest, current_user = Depends(get_current_user)):
|
||||
"""
|
||||
更新租户信息
|
||||
"""
|
||||
try:
|
||||
req_dict = request.dict()
|
||||
tid = req_dict.pop("tenant_id")
|
||||
TenantService.update_by_id(tid, req_dict)
|
||||
return get_json_result(data=True)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=str(e)
|
||||
)
|
||||
2
api/common/README.md
Normal file
2
api/common/README.md
Normal file
@@ -0,0 +1,2 @@
|
||||
The python files in this directory are shared between service. They contain common utilities, models, and functions that can be used across various
|
||||
services to ensure consistency and reduce code duplication.
|
||||
21
api/common/base64.py
Normal file
21
api/common/base64.py
Normal file
@@ -0,0 +1,21 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import base64
|
||||
|
||||
def encode_to_base64(input_string):
|
||||
base64_encoded = base64.b64encode(input_string.encode('utf-8'))
|
||||
return base64_encoded.decode('utf-8')
|
||||
59
api/common/check_team_permission.py
Normal file
59
api/common/check_team_permission.py
Normal file
@@ -0,0 +1,59 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
from api.db import TenantPermission
|
||||
from api.db.db_models import File, Knowledgebase
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.user_service import TenantService
|
||||
|
||||
|
||||
def check_kb_team_permission(kb: dict | Knowledgebase, other: str) -> bool:
|
||||
kb = kb.to_dict() if isinstance(kb, Knowledgebase) else kb
|
||||
|
||||
kb_tenant_id = kb["tenant_id"]
|
||||
|
||||
if kb_tenant_id == other:
|
||||
return True
|
||||
|
||||
if kb["permission"] != TenantPermission.TEAM:
|
||||
return False
|
||||
|
||||
joined_tenants = TenantService.get_joined_tenants_by_user_id(other)
|
||||
return any(tenant["tenant_id"] == kb_tenant_id for tenant in joined_tenants)
|
||||
|
||||
|
||||
def check_file_team_permission(file: dict | File, other: str) -> bool:
|
||||
file = file.to_dict() if isinstance(file, File) else file
|
||||
|
||||
file_tenant_id = file["tenant_id"]
|
||||
if file_tenant_id == other:
|
||||
return True
|
||||
|
||||
file_id = file["id"]
|
||||
|
||||
kb_ids = [kb_info["kb_id"] for kb_info in FileService.get_kb_id_by_file_id(file_id)]
|
||||
|
||||
for kb_id in kb_ids:
|
||||
ok, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not ok:
|
||||
continue
|
||||
|
||||
if check_kb_team_permission(kb, other):
|
||||
return True
|
||||
|
||||
return False
|
||||
21
api/common/exceptions.py
Normal file
21
api/common/exceptions.py
Normal file
@@ -0,0 +1,21 @@
|
||||
class AdminException(Exception):
|
||||
def __init__(self, message, code=400):
|
||||
super().__init__(message)
|
||||
self.type = "admin"
|
||||
self.code = code
|
||||
self.message = message
|
||||
|
||||
|
||||
class UserNotFoundError(AdminException):
|
||||
def __init__(self, username):
|
||||
super().__init__(f"User '{username}' not found", 404)
|
||||
|
||||
|
||||
class UserAlreadyExistsError(AdminException):
|
||||
def __init__(self, username):
|
||||
super().__init__(f"User '{username}' already exists", 409)
|
||||
|
||||
|
||||
class CannotDeleteAdminError(AdminException):
|
||||
def __init__(self):
|
||||
super().__init__("Cannot delete admin account", 403)
|
||||
28
api/constants.py
Normal file
28
api/constants.py
Normal file
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NAME_LENGTH_LIMIT = 2**10
|
||||
|
||||
IMG_BASE64_PREFIX = "data:image/png;base64,"
|
||||
|
||||
SERVICE_CONF = "service_conf.yaml"
|
||||
|
||||
API_VERSION = "v1"
|
||||
RAG_FLOW_SERVICE_NAME = "ragflow"
|
||||
REQUEST_WAIT_SEC = 2
|
||||
REQUEST_MAX_WAIT_SEC = 300
|
||||
|
||||
DATASET_NAME_LIMIT = 128
|
||||
FILE_NAME_LEN_LIMIT = 255
|
||||
141
api/db/__init__.py
Normal file
141
api/db/__init__.py
Normal file
@@ -0,0 +1,141 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from enum import Enum
|
||||
from enum import IntEnum
|
||||
from strenum import StrEnum
|
||||
|
||||
|
||||
class StatusEnum(Enum):
|
||||
VALID = "1"
|
||||
INVALID = "0"
|
||||
|
||||
|
||||
class ActiveEnum(Enum):
|
||||
ACTIVE = "1"
|
||||
INACTIVE = "0"
|
||||
|
||||
|
||||
class UserTenantRole(StrEnum):
|
||||
OWNER = 'owner'
|
||||
ADMIN = 'admin'
|
||||
NORMAL = 'normal'
|
||||
INVITE = 'invite'
|
||||
|
||||
|
||||
class TenantPermission(StrEnum):
|
||||
ME = 'me'
|
||||
TEAM = 'team'
|
||||
|
||||
|
||||
class SerializedType(IntEnum):
|
||||
PICKLE = 1
|
||||
JSON = 2
|
||||
|
||||
|
||||
class FileType(StrEnum):
|
||||
PDF = 'pdf'
|
||||
DOC = 'doc'
|
||||
VISUAL = 'visual'
|
||||
AURAL = 'aural'
|
||||
VIRTUAL = 'virtual'
|
||||
FOLDER = 'folder'
|
||||
OTHER = "other"
|
||||
|
||||
VALID_FILE_TYPES = {FileType.PDF, FileType.DOC, FileType.VISUAL, FileType.AURAL, FileType.VIRTUAL, FileType.FOLDER, FileType.OTHER}
|
||||
|
||||
class LLMType(StrEnum):
|
||||
CHAT = 'chat'
|
||||
EMBEDDING = 'embedding'
|
||||
SPEECH2TEXT = 'speech2text'
|
||||
IMAGE2TEXT = 'image2text'
|
||||
RERANK = 'rerank'
|
||||
TTS = 'tts'
|
||||
|
||||
|
||||
class ChatStyle(StrEnum):
|
||||
CREATIVE = 'Creative'
|
||||
PRECISE = 'Precise'
|
||||
EVENLY = 'Evenly'
|
||||
CUSTOM = 'Custom'
|
||||
|
||||
|
||||
class TaskStatus(StrEnum):
|
||||
UNSTART = "0"
|
||||
RUNNING = "1"
|
||||
CANCEL = "2"
|
||||
DONE = "3"
|
||||
FAIL = "4"
|
||||
|
||||
|
||||
VALID_TASK_STATUS = {TaskStatus.UNSTART, TaskStatus.RUNNING, TaskStatus.CANCEL, TaskStatus.DONE, TaskStatus.FAIL}
|
||||
|
||||
|
||||
class ParserType(StrEnum):
|
||||
PRESENTATION = "presentation"
|
||||
LAWS = "laws"
|
||||
MANUAL = "manual"
|
||||
PAPER = "paper"
|
||||
RESUME = "resume"
|
||||
BOOK = "book"
|
||||
QA = "qa"
|
||||
TABLE = "table"
|
||||
NAIVE = "naive"
|
||||
PICTURE = "picture"
|
||||
ONE = "one"
|
||||
AUDIO = "audio"
|
||||
EMAIL = "email"
|
||||
KG = "knowledge_graph"
|
||||
TAG = "tag"
|
||||
|
||||
|
||||
class FileSource(StrEnum):
|
||||
LOCAL = ""
|
||||
KNOWLEDGEBASE = "knowledgebase"
|
||||
S3 = "s3"
|
||||
|
||||
|
||||
class CanvasType(StrEnum):
|
||||
ChatBot = "chatbot"
|
||||
DocBot = "docbot"
|
||||
|
||||
|
||||
class CanvasCategory(StrEnum):
|
||||
Agent = "agent_canvas"
|
||||
DataFlow = "dataflow_canvas"
|
||||
|
||||
VALID_CANVAS_CATEGORIES = {CanvasCategory.Agent, CanvasCategory.DataFlow}
|
||||
|
||||
|
||||
class MCPServerType(StrEnum):
|
||||
SSE = "sse"
|
||||
STREAMABLE_HTTP = "streamable-http"
|
||||
|
||||
|
||||
VALID_MCP_SERVER_TYPES = {MCPServerType.SSE, MCPServerType.STREAMABLE_HTTP}
|
||||
|
||||
|
||||
class PipelineTaskType(StrEnum):
|
||||
PARSE = "Parse"
|
||||
DOWNLOAD = "Download"
|
||||
RAPTOR = "RAPTOR"
|
||||
GRAPH_RAG = "GraphRAG"
|
||||
MINDMAP = "Mindmap"
|
||||
|
||||
|
||||
VALID_PIPELINE_TASK_TYPES = {PipelineTaskType.PARSE, PipelineTaskType.DOWNLOAD, PipelineTaskType.RAPTOR, PipelineTaskType.GRAPH_RAG, PipelineTaskType.MINDMAP}
|
||||
|
||||
|
||||
KNOWLEDGEBASE_FOLDER_NAME=".knowledgebase"
|
||||
1146
api/db/db_models.py
Normal file
1146
api/db/db_models.py
Normal file
File diff suppressed because it is too large
Load Diff
128
api/db/db_utils.py
Normal file
128
api/db/db_utils.py
Normal file
@@ -0,0 +1,128 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import operator
|
||||
from functools import reduce
|
||||
|
||||
from playhouse.pool import PooledMySQLDatabase
|
||||
|
||||
from api.utils import current_timestamp, timestamp_to_date
|
||||
|
||||
from api.db.db_models import DB, DataBaseModel
|
||||
|
||||
|
||||
@DB.connection_context()
|
||||
def bulk_insert_into_db(model, data_source, replace_on_conflict=False):
|
||||
DB.create_tables([model])
|
||||
|
||||
for i, data in enumerate(data_source):
|
||||
current_time = current_timestamp() + i
|
||||
current_date = timestamp_to_date(current_time)
|
||||
if 'create_time' not in data:
|
||||
data['create_time'] = current_time
|
||||
data['create_date'] = timestamp_to_date(data['create_time'])
|
||||
data['update_time'] = current_time
|
||||
data['update_date'] = current_date
|
||||
|
||||
preserve = tuple(data_source[0].keys() - {'create_time', 'create_date'})
|
||||
|
||||
batch_size = 1000
|
||||
|
||||
for i in range(0, len(data_source), batch_size):
|
||||
with DB.atomic():
|
||||
query = model.insert_many(data_source[i:i + batch_size])
|
||||
if replace_on_conflict:
|
||||
if isinstance(DB, PooledMySQLDatabase):
|
||||
query = query.on_conflict(preserve=preserve)
|
||||
else:
|
||||
query = query.on_conflict(conflict_target="id", preserve=preserve)
|
||||
query.execute()
|
||||
|
||||
|
||||
def get_dynamic_db_model(base, job_id):
|
||||
return type(base.model(
|
||||
table_index=get_dynamic_tracking_table_index(job_id=job_id)))
|
||||
|
||||
|
||||
def get_dynamic_tracking_table_index(job_id):
|
||||
return job_id[:8]
|
||||
|
||||
|
||||
def fill_db_model_object(model_object, human_model_dict):
|
||||
for k, v in human_model_dict.items():
|
||||
attr_name = 'f_%s' % k
|
||||
if hasattr(model_object.__class__, attr_name):
|
||||
setattr(model_object, attr_name, v)
|
||||
return model_object
|
||||
|
||||
|
||||
# https://docs.peewee-orm.com/en/latest/peewee/query_operators.html
|
||||
supported_operators = {
|
||||
'==': operator.eq,
|
||||
'<': operator.lt,
|
||||
'<=': operator.le,
|
||||
'>': operator.gt,
|
||||
'>=': operator.ge,
|
||||
'!=': operator.ne,
|
||||
'<<': operator.lshift,
|
||||
'>>': operator.rshift,
|
||||
'%': operator.mod,
|
||||
'**': operator.pow,
|
||||
'^': operator.xor,
|
||||
'~': operator.inv,
|
||||
}
|
||||
|
||||
|
||||
def query_dict2expression(
|
||||
model: type[DataBaseModel], query: dict[str, bool | int | str | list | tuple]):
|
||||
expression = []
|
||||
|
||||
for field, value in query.items():
|
||||
if not isinstance(value, (list, tuple)):
|
||||
value = ('==', value)
|
||||
op, *val = value
|
||||
|
||||
field = getattr(model, f'f_{field}')
|
||||
value = supported_operators[op](
|
||||
field, val[0]) if op in supported_operators else getattr(
|
||||
field, op)(
|
||||
*val)
|
||||
expression.append(value)
|
||||
|
||||
return reduce(operator.iand, expression)
|
||||
|
||||
|
||||
def query_db(model: type[DataBaseModel], limit: int = 0, offset: int = 0,
|
||||
query: dict = None, order_by: str | list | tuple | None = None):
|
||||
data = model.select()
|
||||
if query:
|
||||
data = data.where(query_dict2expression(model, query))
|
||||
count = data.count()
|
||||
|
||||
if not order_by:
|
||||
order_by = 'create_time'
|
||||
if not isinstance(order_by, (list, tuple)):
|
||||
order_by = (order_by, 'asc')
|
||||
order_by, order = order_by
|
||||
order_by = getattr(model, f'f_{order_by}')
|
||||
order_by = getattr(order_by, order)()
|
||||
data = data.order_by(order_by)
|
||||
|
||||
if limit > 0:
|
||||
data = data.limit(limit)
|
||||
if offset > 0:
|
||||
data = data.offset(offset)
|
||||
|
||||
return list(data), count
|
||||
179
api/db/init_data.py
Normal file
179
api/db/init_data.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from copy import deepcopy
|
||||
|
||||
from api.db import LLMType, UserTenantRole
|
||||
from api.db.db_models import init_database_tables as init_web_db, LLMFactories, LLM, TenantLLM
|
||||
from api.db.services import UserService
|
||||
from api.db.services.canvas_service import CanvasTemplateService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.tenant_llm_service import LLMFactoriesService, TenantLLMService
|
||||
from api.db.services.llm_service import LLMService, LLMBundle, get_init_tenant_llm
|
||||
from api.db.services.user_service import TenantService, UserTenantService
|
||||
from api import settings
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from api.common.base64 import encode_to_base64
|
||||
|
||||
|
||||
def init_superuser():
|
||||
user_info = {
|
||||
"id": uuid.uuid1().hex,
|
||||
"password": encode_to_base64("admin"),
|
||||
"nickname": "admin",
|
||||
"is_superuser": True,
|
||||
"email": "admin@ragflow.io",
|
||||
"creator": "system",
|
||||
"status": "1",
|
||||
}
|
||||
tenant = {
|
||||
"id": user_info["id"],
|
||||
"name": user_info["nickname"] + "‘s Kingdom",
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_info["id"],
|
||||
"user_id": user_info["id"],
|
||||
"invited_by": user_info["id"],
|
||||
"role": UserTenantRole.OWNER
|
||||
}
|
||||
|
||||
tenant_llm = get_init_tenant_llm(user_info["id"])
|
||||
|
||||
if not UserService.save(**user_info):
|
||||
logging.error("can't init admin.")
|
||||
return
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
logging.info(
|
||||
"Super user initialized. email: admin@ragflow.io, password: admin. Changing the password after login is strongly recommended.")
|
||||
|
||||
chat_mdl = LLMBundle(tenant["id"], LLMType.CHAT, tenant["llm_id"])
|
||||
msg = chat_mdl.chat(system="", history=[
|
||||
{"role": "user", "content": "Hello!"}], gen_conf={})
|
||||
if msg.find("ERROR: ") == 0:
|
||||
logging.error(
|
||||
"'{}' doesn't work. {}".format(
|
||||
tenant["llm_id"],
|
||||
msg))
|
||||
embd_mdl = LLMBundle(tenant["id"], LLMType.EMBEDDING, tenant["embd_id"])
|
||||
v, c = embd_mdl.encode(["Hello!"])
|
||||
if c == 0:
|
||||
logging.error(
|
||||
"'{}' doesn't work!".format(
|
||||
tenant["embd_id"]))
|
||||
|
||||
|
||||
def init_llm_factory():
|
||||
try:
|
||||
LLMService.filter_delete([(LLM.fid == "MiniMax" or LLM.fid == "Minimax")])
|
||||
LLMService.filter_delete([(LLM.fid == "cohere")])
|
||||
LLMFactoriesService.filter_delete([LLMFactories.name == "cohere"])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
factory_llm_infos = settings.FACTORY_LLM_INFOS
|
||||
for factory_llm_info in factory_llm_infos:
|
||||
info = deepcopy(factory_llm_info)
|
||||
llm_infos = info.pop("llm")
|
||||
try:
|
||||
LLMFactoriesService.save(**info)
|
||||
except Exception:
|
||||
pass
|
||||
LLMService.filter_delete([LLM.fid == factory_llm_info["name"]])
|
||||
for llm_info in llm_infos:
|
||||
llm_info["fid"] = factory_llm_info["name"]
|
||||
try:
|
||||
LLMService.save(**llm_info)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
LLMFactoriesService.filter_delete([(LLMFactories.name == "Local") | (LLMFactories.name == "novita.ai")])
|
||||
LLMService.filter_delete([LLM.fid == "Local"])
|
||||
LLMService.filter_delete([LLM.llm_name == "qwen-vl-max"])
|
||||
LLMService.filter_delete([LLM.fid == "Moonshot", LLM.llm_name == "flag-embedding"])
|
||||
TenantLLMService.filter_delete([TenantLLM.llm_factory == "Moonshot", TenantLLM.llm_name == "flag-embedding"])
|
||||
LLMFactoriesService.filter_delete([LLMFactoriesService.model.name == "QAnything"])
|
||||
LLMService.filter_delete([LLMService.model.fid == "QAnything"])
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "QAnything"], {"llm_factory": "Youdao"})
|
||||
TenantLLMService.filter_update([TenantLLMService.model.llm_factory == "cohere"], {"llm_factory": "Cohere"})
|
||||
TenantService.filter_update([1 == 1], {
|
||||
"parser_ids": "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"})
|
||||
## insert openai two embedding models to the current openai user.
|
||||
# print("Start to insert 2 OpenAI embedding models...")
|
||||
tenant_ids = set([row["tenant_id"] for row in TenantLLMService.get_openai_models()])
|
||||
for tid in tenant_ids:
|
||||
for row in TenantLLMService.query(llm_factory="OpenAI", tenant_id=tid):
|
||||
row = row.to_dict()
|
||||
row["model_type"] = LLMType.EMBEDDING.value
|
||||
row["llm_name"] = "text-embedding-3-small"
|
||||
row["used_tokens"] = 0
|
||||
try:
|
||||
TenantLLMService.save(**row)
|
||||
row = deepcopy(row)
|
||||
row["llm_name"] = "text-embedding-3-large"
|
||||
TenantLLMService.save(**row)
|
||||
except Exception:
|
||||
pass
|
||||
break
|
||||
doc_count = DocumentService.get_all_kb_doc_count()
|
||||
for kb_id in KnowledgebaseService.get_all_ids():
|
||||
KnowledgebaseService.update_document_number_in_init(kb_id=kb_id, doc_num=doc_count.get(kb_id, 0))
|
||||
|
||||
|
||||
|
||||
def add_graph_templates():
|
||||
dir = os.path.join(get_project_base_directory(), "agent", "templates")
|
||||
CanvasTemplateService.filter_delete([1 == 1])
|
||||
if not os.path.exists(dir):
|
||||
logging.warning("Missing agent templates!")
|
||||
return
|
||||
|
||||
for fnm in os.listdir(dir):
|
||||
try:
|
||||
cnvs = json.load(open(os.path.join(dir, fnm), "r",encoding="utf-8"))
|
||||
try:
|
||||
CanvasTemplateService.save(**cnvs)
|
||||
except Exception:
|
||||
CanvasTemplateService.update_by_id(cnvs["id"], cnvs)
|
||||
except Exception:
|
||||
logging.exception("Add agent templates error: ")
|
||||
|
||||
|
||||
def init_web_data():
|
||||
start_time = time.time()
|
||||
|
||||
init_llm_factory()
|
||||
# if not UserService.get_all().count():
|
||||
# init_superuser()
|
||||
|
||||
add_graph_templates()
|
||||
logging.info("init web data success:{}".format(time.time() - start_time))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
0
api/db/joint_services/__init__.py
Normal file
0
api/db/joint_services/__init__.py
Normal file
327
api/db/joint_services/user_account_service.py
Normal file
327
api/db/joint_services/user_account_service.py
Normal file
@@ -0,0 +1,327 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import uuid
|
||||
|
||||
from api import settings
|
||||
from api.utils.api_utils import group_by
|
||||
from api.db import FileType, UserTenantRole, ActiveEnum
|
||||
from api.db.services.api_service import APITokenService, API4ConversationService
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.db.services.llm_service import get_init_tenant_llm
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.mcp_server_service import MCPServerService
|
||||
from api.db.services.search_service import SearchService
|
||||
from api.db.services.task_service import TaskService
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.db.services.user_canvas_version import UserCanvasVersionService
|
||||
from api.db.services.user_service import TenantService, UserService, UserTenantService
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.nlp import search
|
||||
|
||||
|
||||
def create_new_user(user_info: dict) -> dict:
|
||||
"""
|
||||
Add a new user, and create tenant, tenant llm, file folder for new user.
|
||||
:param user_info: {
|
||||
"email": <example@example.com>,
|
||||
"nickname": <str, "name">,
|
||||
"password": <decrypted password>,
|
||||
"login_channel": <enum, "password">,
|
||||
"is_superuser": <bool, role == "admin">,
|
||||
}
|
||||
:return: {
|
||||
"success": <bool>,
|
||||
"user_info": <dict>, # if true, return user_info
|
||||
}
|
||||
"""
|
||||
# generate user_id and access_token for user
|
||||
user_id = uuid.uuid1().hex
|
||||
user_info['id'] = user_id
|
||||
user_info['access_token'] = uuid.uuid1().hex
|
||||
# construct tenant info
|
||||
tenant = {
|
||||
"id": user_id,
|
||||
"name": user_info["nickname"] + "‘s Kingdom",
|
||||
"llm_id": settings.CHAT_MDL,
|
||||
"embd_id": settings.EMBEDDING_MDL,
|
||||
"asr_id": settings.ASR_MDL,
|
||||
"parser_ids": settings.PARSERS,
|
||||
"img2txt_id": settings.IMAGE2TEXT_MDL,
|
||||
"rerank_id": settings.RERANK_MDL,
|
||||
}
|
||||
usr_tenant = {
|
||||
"tenant_id": user_id,
|
||||
"user_id": user_id,
|
||||
"invited_by": user_id,
|
||||
"role": UserTenantRole.OWNER,
|
||||
}
|
||||
# construct file folder info
|
||||
file_id = uuid.uuid1().hex
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": user_id,
|
||||
"created_by": user_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
try:
|
||||
tenant_llm = get_init_tenant_llm(user_id)
|
||||
|
||||
if not UserService.save(**user_info):
|
||||
return {"success": False}
|
||||
|
||||
TenantService.insert(**tenant)
|
||||
UserTenantService.insert(**usr_tenant)
|
||||
TenantLLMService.insert_many(tenant_llm)
|
||||
FileService.insert(file)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"user_info": user_info,
|
||||
}
|
||||
|
||||
except Exception as create_error:
|
||||
logging.exception(create_error)
|
||||
# rollback
|
||||
try:
|
||||
TenantService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
try:
|
||||
u = UserTenantService.query(tenant_id=user_id)
|
||||
if u:
|
||||
UserTenantService.delete_by_id(u[0].id)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
try:
|
||||
TenantLLMService.delete_by_tenant_id(user_id)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
try:
|
||||
FileService.delete_by_id(file["id"])
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
# delete user row finally
|
||||
try:
|
||||
UserService.delete_by_id(user_id)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
# reraise
|
||||
raise create_error
|
||||
|
||||
|
||||
def delete_user_data(user_id: str) -> dict:
|
||||
# use user_id to delete
|
||||
usr = UserService.filter_by_id(user_id)
|
||||
if not usr:
|
||||
return {"success": False, "message": f"{user_id} can't be found."}
|
||||
# check is inactive and not admin
|
||||
if usr.is_active == ActiveEnum.ACTIVE.value:
|
||||
return {"success": False, "message": f"{user_id} is active and can't be deleted."}
|
||||
if usr.is_superuser:
|
||||
return {"success": False, "message": "Can't delete the super user."}
|
||||
# tenant info
|
||||
tenants = UserTenantService.get_user_tenant_relation_by_user_id(usr.id)
|
||||
owned_tenant = [t for t in tenants if t["role"] == UserTenantRole.OWNER.value]
|
||||
|
||||
done_msg = ''
|
||||
try:
|
||||
# step1. delete owned tenant info
|
||||
if owned_tenant:
|
||||
done_msg += "Start to delete owned tenant.\n"
|
||||
tenant_id = owned_tenant[0]["tenant_id"]
|
||||
kb_ids = KnowledgebaseService.get_kb_ids(usr.id)
|
||||
# step1.1 delete knowledgebase related file and info
|
||||
if kb_ids:
|
||||
# step1.1.1 delete files in storage, remove bucket
|
||||
for kb_id in kb_ids:
|
||||
if STORAGE_IMPL.bucket_exists(kb_id):
|
||||
STORAGE_IMPL.remove_bucket(kb_id)
|
||||
done_msg += f"- Removed {len(kb_ids)} dataset's buckets.\n"
|
||||
# step1.1.2 delete file and document info in db
|
||||
doc_ids = DocumentService.get_all_doc_ids_by_kb_ids(kb_ids)
|
||||
if doc_ids:
|
||||
doc_delete_res = DocumentService.delete_by_ids([i["id"] for i in doc_ids])
|
||||
done_msg += f"- Deleted {doc_delete_res} document records.\n"
|
||||
task_delete_res = TaskService.delete_by_doc_ids([i["id"] for i in doc_ids])
|
||||
done_msg += f"- Deleted {task_delete_res} task records.\n"
|
||||
file_ids = FileService.get_all_file_ids_by_tenant_id(usr.id)
|
||||
if file_ids:
|
||||
file_delete_res = FileService.delete_by_ids([f["id"] for f in file_ids])
|
||||
done_msg += f"- Deleted {file_delete_res} file records.\n"
|
||||
if doc_ids or file_ids:
|
||||
file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids(
|
||||
[i["id"] for i in doc_ids],
|
||||
[f["id"] for f in file_ids]
|
||||
)
|
||||
done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n"
|
||||
# step1.1.3 delete chunk in es
|
||||
r = settings.docStoreConn.delete({"kb_id": kb_ids},
|
||||
search.index_name(tenant_id), kb_ids)
|
||||
done_msg += f"- Deleted {r} chunk records.\n"
|
||||
kb_delete_res = KnowledgebaseService.delete_by_ids(kb_ids)
|
||||
done_msg += f"- Deleted {kb_delete_res} knowledgebase records.\n"
|
||||
# step1.1.4 delete agents
|
||||
agent_delete_res = delete_user_agents(usr.id)
|
||||
done_msg += f"- Deleted {agent_delete_res['agents_deleted_count']} agent, {agent_delete_res['version_deleted_count']} versions records.\n"
|
||||
# step1.1.5 delete dialogs
|
||||
dialog_delete_res = delete_user_dialogs(usr.id)
|
||||
done_msg += f"- Deleted {dialog_delete_res['dialogs_deleted_count']} dialogs, {dialog_delete_res['conversations_deleted_count']} conversations, {dialog_delete_res['api_token_deleted_count']} api tokens, {dialog_delete_res['api4conversation_deleted_count']} api4conversations.\n"
|
||||
# step1.1.6 delete mcp server
|
||||
mcp_delete_res = MCPServerService.delete_by_tenant_id(usr.id)
|
||||
done_msg += f"- Deleted {mcp_delete_res} MCP server.\n"
|
||||
# step1.1.7 delete search
|
||||
search_delete_res = SearchService.delete_by_tenant_id(usr.id)
|
||||
done_msg += f"- Deleted {search_delete_res} search records.\n"
|
||||
# step1.2 delete tenant_llm and tenant_langfuse
|
||||
llm_delete_res = TenantLLMService.delete_by_tenant_id(tenant_id)
|
||||
done_msg += f"- Deleted {llm_delete_res} tenant-LLM records.\n"
|
||||
langfuse_delete_res = TenantLangfuseService.delete_ty_tenant_id(tenant_id)
|
||||
done_msg += f"- Deleted {langfuse_delete_res} langfuse records.\n"
|
||||
# step1.3 delete own tenant
|
||||
tenant_delete_res = TenantService.delete_by_id(tenant_id)
|
||||
done_msg += f"- Deleted {tenant_delete_res} tenant.\n"
|
||||
# step2 delete user-tenant relation
|
||||
if tenants:
|
||||
# step2.1 delete docs and files in joined team
|
||||
joined_tenants = [t for t in tenants if t["role"] == UserTenantRole.NORMAL.value]
|
||||
if joined_tenants:
|
||||
done_msg += "Start to delete data in joined tenants.\n"
|
||||
created_documents = DocumentService.get_all_docs_by_creator_id(usr.id)
|
||||
if created_documents:
|
||||
# step2.1.1 delete files
|
||||
doc_file_info = File2DocumentService.get_by_document_ids([d['id'] for d in created_documents])
|
||||
created_files = FileService.get_by_ids([f['file_id'] for f in doc_file_info])
|
||||
if created_files:
|
||||
# step2.1.1.1 delete file in storage
|
||||
for f in created_files:
|
||||
STORAGE_IMPL.rm(f.parent_id, f.location)
|
||||
done_msg += f"- Deleted {len(created_files)} uploaded file.\n"
|
||||
# step2.1.1.2 delete file record
|
||||
file_delete_res = FileService.delete_by_ids([f.id for f in created_files])
|
||||
done_msg += f"- Deleted {file_delete_res} file records.\n"
|
||||
# step2.1.2 delete document-file relation record
|
||||
file2doc_delete_res = File2DocumentService.delete_by_document_ids_or_file_ids(
|
||||
[d['id'] for d in created_documents],
|
||||
[f.id for f in created_files]
|
||||
)
|
||||
done_msg += f"- Deleted {file2doc_delete_res} document-file relation records.\n"
|
||||
# step2.1.3 delete chunks
|
||||
doc_groups = group_by(created_documents, "tenant_id")
|
||||
kb_grouped_doc = {k: group_by(v, "kb_id") for k, v in doc_groups.items()}
|
||||
# chunks in {'tenant_id': {'kb_id': [{'id': doc_id}]}} structure
|
||||
chunk_delete_res = 0
|
||||
kb_doc_info = {}
|
||||
for _tenant_id, kb_doc in kb_grouped_doc.items():
|
||||
for _kb_id, docs in kb_doc.items():
|
||||
chunk_delete_res += settings.docStoreConn.delete(
|
||||
{"doc_id": [d["id"] for d in docs]},
|
||||
search.index_name(_tenant_id), _kb_id
|
||||
)
|
||||
# record doc info
|
||||
if _kb_id in kb_doc_info.keys():
|
||||
kb_doc_info[_kb_id]['doc_num'] += 1
|
||||
kb_doc_info[_kb_id]['token_num'] += sum([d["token_num"] for d in docs])
|
||||
kb_doc_info[_kb_id]['chunk_num'] += sum([d["chunk_num"] for d in docs])
|
||||
else:
|
||||
kb_doc_info[_kb_id] = {
|
||||
'doc_num': 1,
|
||||
'token_num': sum([d["token_num"] for d in docs]),
|
||||
'chunk_num': sum([d["chunk_num"] for d in docs])
|
||||
}
|
||||
done_msg += f"- Deleted {chunk_delete_res} chunks.\n"
|
||||
# step2.1.4 delete tasks
|
||||
task_delete_res = TaskService.delete_by_doc_ids([d['id'] for d in created_documents])
|
||||
done_msg += f"- Deleted {task_delete_res} tasks.\n"
|
||||
# step2.1.5 delete document record
|
||||
doc_delete_res = DocumentService.delete_by_ids([d['id'] for d in created_documents])
|
||||
done_msg += f"- Deleted {doc_delete_res} documents.\n"
|
||||
# step2.1.6 update knowledge base doc&chunk&token cnt
|
||||
for kb_id, doc_num in kb_doc_info.items():
|
||||
KnowledgebaseService.decrease_document_num_in_delete(kb_id, doc_num)
|
||||
|
||||
# step2.2 delete relation
|
||||
user_tenant_delete_res = UserTenantService.delete_by_ids([t["id"] for t in tenants])
|
||||
done_msg += f"- Deleted {user_tenant_delete_res} user-tenant records.\n"
|
||||
# step3 finally delete user
|
||||
user_delete_res = UserService.delete_by_id(usr.id)
|
||||
done_msg += f"- Deleted {user_delete_res} user.\nDelete done!"
|
||||
|
||||
return {"success": True, "message": f"Successfully deleted user. Details:\n{done_msg}"}
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return {"success": False, "message": f"Error: {str(e)}. Already done:\n{done_msg}"}
|
||||
|
||||
|
||||
def delete_user_agents(user_id: str) -> dict:
|
||||
"""
|
||||
use user_id to delete
|
||||
:return: {
|
||||
"agents_deleted_count": 1,
|
||||
"version_deleted_count": 2
|
||||
}
|
||||
"""
|
||||
agents_deleted_count, agents_version_deleted_count = 0, 0
|
||||
user_agents = UserCanvasService.get_all_agents_by_tenant_ids([user_id], user_id)
|
||||
if user_agents:
|
||||
agents_version = UserCanvasVersionService.get_all_canvas_version_by_canvas_ids([a['id'] for a in user_agents])
|
||||
agents_version_deleted_count = UserCanvasVersionService.delete_by_ids([v['id'] for v in agents_version])
|
||||
agents_deleted_count = UserCanvasService.delete_by_ids([a['id'] for a in user_agents])
|
||||
return {
|
||||
"agents_deleted_count": agents_deleted_count,
|
||||
"version_deleted_count": agents_version_deleted_count
|
||||
}
|
||||
|
||||
|
||||
def delete_user_dialogs(user_id: str) -> dict:
|
||||
"""
|
||||
use user_id to delete
|
||||
:return: {
|
||||
"dialogs_deleted_count": 1,
|
||||
"conversations_deleted_count": 1,
|
||||
"api_token_deleted_count": 2,
|
||||
"api4conversation_deleted_count": 2
|
||||
}
|
||||
"""
|
||||
dialog_deleted_count, conversations_deleted_count, api_token_deleted_count, api4conversation_deleted_count = 0, 0, 0, 0
|
||||
user_dialogs = DialogService.get_all_dialogs_by_tenant_id(user_id)
|
||||
if user_dialogs:
|
||||
# delete conversation
|
||||
conversations = ConversationService.get_all_conversation_by_dialog_ids([ud['id'] for ud in user_dialogs])
|
||||
conversations_deleted_count = ConversationService.delete_by_ids([c['id'] for c in conversations])
|
||||
# delete api token
|
||||
api_token_deleted_count = APITokenService.delete_by_tenant_id(user_id)
|
||||
# delete api for conversation
|
||||
api4conversation_deleted_count = API4ConversationService.delete_by_dialog_ids([ud['id'] for ud in user_dialogs])
|
||||
# delete dialog at last
|
||||
dialog_deleted_count = DialogService.delete_by_ids([ud['id'] for ud in user_dialogs])
|
||||
return {
|
||||
"dialogs_deleted_count": dialog_deleted_count,
|
||||
"conversations_deleted_count": conversations_deleted_count,
|
||||
"api_token_deleted_count": api_token_deleted_count,
|
||||
"api4conversation_deleted_count": api4conversation_deleted_count
|
||||
}
|
||||
28
api/db/reload_config_base.py
Normal file
28
api/db/reload_config_base.py
Normal file
@@ -0,0 +1,28 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
class ReloadConfigBase:
|
||||
@classmethod
|
||||
def get_all(cls):
|
||||
configs = {}
|
||||
for k, v in cls.__dict__.items():
|
||||
if not callable(getattr(cls, k)) and not k.startswith(
|
||||
"__") and not k.startswith("_"):
|
||||
configs[k] = v
|
||||
return configs
|
||||
|
||||
@classmethod
|
||||
def get(cls, config_name):
|
||||
return getattr(cls, config_name) if hasattr(cls, config_name) else None
|
||||
54
api/db/runtime_config.py
Normal file
54
api/db/runtime_config.py
Normal file
@@ -0,0 +1,54 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from api.versions import get_ragflow_version
|
||||
from .reload_config_base import ReloadConfigBase
|
||||
|
||||
|
||||
class RuntimeConfig(ReloadConfigBase):
|
||||
DEBUG = None
|
||||
WORK_MODE = None
|
||||
HTTP_PORT = None
|
||||
JOB_SERVER_HOST = None
|
||||
JOB_SERVER_VIP = None
|
||||
ENV = dict()
|
||||
SERVICE_DB = None
|
||||
LOAD_CONFIG_MANAGER = False
|
||||
|
||||
@classmethod
|
||||
def init_config(cls, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if hasattr(cls, k):
|
||||
setattr(cls, k, v)
|
||||
|
||||
@classmethod
|
||||
def init_env(cls):
|
||||
cls.ENV.update({"version": get_ragflow_version()})
|
||||
|
||||
@classmethod
|
||||
def load_config_manager(cls):
|
||||
cls.LOAD_CONFIG_MANAGER = True
|
||||
|
||||
@classmethod
|
||||
def get_env(cls, key):
|
||||
return cls.ENV.get(key, None)
|
||||
|
||||
@classmethod
|
||||
def get_all_env(cls):
|
||||
return cls.ENV
|
||||
|
||||
@classmethod
|
||||
def set_service_db(cls, service_db):
|
||||
cls.SERVICE_DB = service_db
|
||||
99
api/db/services/__init__.py
Normal file
99
api/db/services/__init__.py
Normal file
@@ -0,0 +1,99 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import re
|
||||
from pathlib import PurePath
|
||||
|
||||
from .user_service import UserService as UserService
|
||||
|
||||
|
||||
def _split_name_counter(filename: str) -> tuple[str, int | None]:
|
||||
"""
|
||||
Splits a filename into main part and counter (if present in parentheses).
|
||||
|
||||
Args:
|
||||
filename: Input filename string to be parsed
|
||||
|
||||
Returns:
|
||||
A tuple containing:
|
||||
- The main filename part (string)
|
||||
- The counter from parentheses (integer) or None if no counter exists
|
||||
"""
|
||||
pattern = re.compile(r"^(.*?)\((\d+)\)$")
|
||||
|
||||
match = pattern.search(filename)
|
||||
if match:
|
||||
main_part = match.group(1).rstrip()
|
||||
bracket_part = match.group(2)
|
||||
return main_part, int(bracket_part)
|
||||
|
||||
return filename, None
|
||||
|
||||
|
||||
def duplicate_name(query_func, **kwargs) -> str:
|
||||
"""
|
||||
Generates a unique filename by appending/incrementing a counter when duplicates exist.
|
||||
|
||||
Continuously checks for name availability using the provided query function,
|
||||
automatically appending (1), (2), etc. until finding an available name or
|
||||
reaching maximum retries.
|
||||
|
||||
Args:
|
||||
query_func: Callable that accepts keyword arguments and returns:
|
||||
- True if name exists (should be modified)
|
||||
- False if name is available
|
||||
**kwargs: Must contain 'name' key with original filename to check
|
||||
|
||||
Returns:
|
||||
str: Available filename, either:
|
||||
- Original name (if available)
|
||||
- Modified name with counter (e.g., "file(1).txt")
|
||||
|
||||
Raises:
|
||||
KeyError: If 'name' key not provided in kwargs
|
||||
RuntimeError: If unable to generate unique name after maximum retries
|
||||
|
||||
Example:
|
||||
>>> def name_exists(name): return name in existing_files
|
||||
>>> duplicate_name(name_exists, name="document.pdf")
|
||||
'document(1).pdf' # If original exists
|
||||
"""
|
||||
MAX_RETRIES = 1000
|
||||
|
||||
if "name" not in kwargs:
|
||||
raise KeyError("Arguments must contain 'name' key")
|
||||
|
||||
original_name = kwargs["name"]
|
||||
current_name = original_name
|
||||
retries = 0
|
||||
|
||||
while retries < MAX_RETRIES:
|
||||
if not query_func(**kwargs):
|
||||
return current_name
|
||||
|
||||
path = PurePath(current_name)
|
||||
stem = path.stem
|
||||
suffix = path.suffix
|
||||
|
||||
main_part, counter = _split_name_counter(stem)
|
||||
counter = counter + 1 if counter else 1
|
||||
|
||||
new_name = f"{main_part}({counter}){suffix}"
|
||||
|
||||
kwargs["name"] = new_name
|
||||
current_name = new_name
|
||||
retries += 1
|
||||
|
||||
raise RuntimeError(f"Failed to generate unique name within {MAX_RETRIES} attempts. Original: {original_name}")
|
||||
112
api/db/services/api_service.py
Normal file
112
api/db/services/api_service.py
Normal file
@@ -0,0 +1,112 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB, API4Conversation, APIToken, Dialog
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class APITokenService(CommonService):
|
||||
model = APIToken
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def used(cls, token):
|
||||
return cls.model.update({
|
||||
"update_time": current_timestamp(),
|
||||
"update_date": datetime_format(datetime.now()),
|
||||
}).where(
|
||||
cls.model.token == token
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_tenant_id(cls, tenant_id):
|
||||
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
|
||||
|
||||
|
||||
class API4ConversationService(CommonService):
|
||||
model = API4Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, dialog_id, tenant_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, id, user_id=None, include_dsl=True, keywords="",
|
||||
from_date=None, to_date=None
|
||||
):
|
||||
if include_dsl:
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
else:
|
||||
fields = [field for field in cls.model._meta.fields.values() if field.name != 'dsl']
|
||||
sessions = cls.model.select(*fields).where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if keywords:
|
||||
sessions = sessions.where(peewee.fn.LOWER(cls.model.message).contains(keywords.lower()))
|
||||
if from_date:
|
||||
sessions = sessions.where(cls.model.create_date >= from_date)
|
||||
if to_date:
|
||||
sessions = sessions.where(cls.model.create_date <= to_date)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
count = sessions.count()
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return count, list(sessions.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def append_message(cls, id, conversation):
|
||||
cls.update_by_id(id, conversation)
|
||||
return cls.model.update(round=cls.model.round + 1).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def stats(cls, tenant_id, from_date, to_date, source=None):
|
||||
if len(to_date) == 10:
|
||||
to_date += " 23:59:59"
|
||||
return cls.model.select(
|
||||
cls.model.create_date.truncate("day").alias("dt"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.id).alias("pv"),
|
||||
peewee.fn.COUNT(
|
||||
cls.model.user_id.distinct()).alias("uv"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.tokens).alias("tokens"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.duration).alias("duration"),
|
||||
peewee.fn.AVG(
|
||||
cls.model.round).alias("round"),
|
||||
peewee.fn.SUM(
|
||||
cls.model.thumb_up).alias("thumb_up")
|
||||
).join(Dialog, on=((cls.model.dialog_id == Dialog.id) & (Dialog.tenant_id == tenant_id))).where(
|
||||
cls.model.create_date >= from_date,
|
||||
cls.model.create_date <= to_date,
|
||||
cls.model.source == source
|
||||
).group_by(cls.model.create_date.truncate("day")).dicts()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_dialog_ids(cls, dialog_ids):
|
||||
return cls.model.delete().where(cls.model.dialog_id.in_(dialog_ids)).execute()
|
||||
350
api/db/services/canvas_service.py
Normal file
350
api/db/services/canvas_service.py
Normal file
@@ -0,0 +1,350 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from uuid import uuid4
|
||||
from agent.canvas import Canvas
|
||||
from api.db import CanvasCategory, TenantPermission
|
||||
from api.db.db_models import DB, CanvasTemplate, User, UserCanvas, API4Conversation
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.api_utils import get_data_openai
|
||||
import tiktoken
|
||||
from peewee import fn
|
||||
|
||||
|
||||
class CanvasTemplateService(CommonService):
|
||||
model = CanvasTemplate
|
||||
|
||||
class DataFlowTemplateService(CommonService):
|
||||
"""
|
||||
Alias of CanvasTemplateService
|
||||
"""
|
||||
model = CanvasTemplate
|
||||
|
||||
|
||||
class UserCanvasService(CommonService):
|
||||
model = UserCanvas
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id,
|
||||
page_number, items_per_page, orderby, desc, id, title, canvas_category=CanvasCategory.Agent):
|
||||
agents = cls.model.select()
|
||||
if id:
|
||||
agents = agents.where(cls.model.id == id)
|
||||
if title:
|
||||
agents = agents.where(cls.model.title == title)
|
||||
agents = agents.where(cls.model.user_id == tenant_id)
|
||||
agents = agents.where(cls.model.canvas_category == canvas_category)
|
||||
if desc:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
agents = agents.paginate(page_number, items_per_page)
|
||||
|
||||
return list(agents.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_agents_by_tenant_ids(cls, tenant_ids, user_id):
|
||||
# will get all permitted agents, be cautious
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.title,
|
||||
cls.model.permission,
|
||||
cls.model.canvas_type,
|
||||
cls.model.canvas_category
|
||||
]
|
||||
# find team agents and owned agents
|
||||
agents = cls.model.select(*fields).where(
|
||||
(cls.model.user_id.in_(tenant_ids) & (cls.model.permission == TenantPermission.TEAM.value)) | (
|
||||
cls.model.user_id == user_id
|
||||
)
|
||||
)
|
||||
# sort by create_time, asc
|
||||
agents.order_by(cls.model.create_time.asc())
|
||||
# maybe cause slow query by deep paginate, optimize later
|
||||
offset, limit = 0, 50
|
||||
res = []
|
||||
while True:
|
||||
ag_batch = agents.offset(offset).limit(limit)
|
||||
_temp = list(ag_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_canvas_id(cls, pid):
|
||||
try:
|
||||
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.title,
|
||||
cls.model.dsl,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
cls.model.update_time,
|
||||
cls.model.user_id,
|
||||
cls.model.create_time,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date,
|
||||
cls.model.canvas_category,
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
]
|
||||
agents = cls.model.select(*fields) \
|
||||
.join(User, on=(cls.model.user_id == User.id)) \
|
||||
.where(cls.model.id == pid)
|
||||
# obj = cls.model.query(id=pid)[0]
|
||||
return True, agents.dicts()[0]
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, keywords, canvas_category=None
|
||||
):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.title,
|
||||
cls.model.dsl,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
cls.model.user_id.alias("tenant_id"),
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
cls.model.update_time,
|
||||
cls.model.canvas_category,
|
||||
]
|
||||
if keywords:
|
||||
agents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
cls.model.user_id.in_(joined_tenant_ids),
|
||||
fn.LOWER(cls.model.title).contains(keywords.lower())
|
||||
#(((cls.model.user_id.in_(joined_tenant_ids)) & (cls.model.permission == TenantPermission.TEAM.value)) | (cls.model.user_id == user_id)),
|
||||
#(fn.LOWER(cls.model.title).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
agents = cls.model.select(*fields).join(User, on=(cls.model.user_id == User.id)).where(
|
||||
cls.model.user_id.in_(joined_tenant_ids)
|
||||
#(((cls.model.user_id.in_(joined_tenant_ids)) & (cls.model.permission == TenantPermission.TEAM.value)) | (cls.model.user_id == user_id))
|
||||
)
|
||||
if canvas_category:
|
||||
agents = agents.where(cls.model.canvas_category == canvas_category)
|
||||
if desc:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
agents = agents.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = agents.count()
|
||||
if page_number and items_per_page:
|
||||
agents = agents.paginate(page_number, items_per_page)
|
||||
return list(agents.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible(cls, canvas_id, tenant_id):
|
||||
from api.db.services.user_service import UserTenantService
|
||||
e, c = UserCanvasService.get_by_canvas_id(canvas_id)
|
||||
if not e:
|
||||
return False
|
||||
|
||||
tids = [t.tenant_id for t in UserTenantService.query(user_id=tenant_id)]
|
||||
if c["user_id"] != canvas_id and c["user_id"] not in tids:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def completion(tenant_id, agent_id, session_id=None, **kwargs):
|
||||
query = kwargs.get("query", "") or kwargs.get("question", "")
|
||||
files = kwargs.get("files", [])
|
||||
inputs = kwargs.get("inputs", {})
|
||||
user_id = kwargs.get("user_id", "")
|
||||
|
||||
if session_id:
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
if not isinstance(conv.dsl, str):
|
||||
conv.dsl = json.dumps(conv.dsl, ensure_ascii=False)
|
||||
canvas = Canvas(conv.dsl, tenant_id, agent_id)
|
||||
else:
|
||||
e, cvs = UserCanvasService.get_by_id(agent_id)
|
||||
assert e, "Agent not found."
|
||||
assert cvs.user_id == tenant_id, "You do not own the agent."
|
||||
if not isinstance(cvs.dsl, str):
|
||||
cvs.dsl = json.dumps(cvs.dsl, ensure_ascii=False)
|
||||
session_id=get_uuid()
|
||||
canvas = Canvas(cvs.dsl, tenant_id, agent_id)
|
||||
canvas.reset()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": cvs.id,
|
||||
"user_id": user_id,
|
||||
"message": [],
|
||||
"source": "agent",
|
||||
"dsl": cvs.dsl,
|
||||
"reference": []
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
conv = API4Conversation(**conv)
|
||||
|
||||
message_id = str(uuid4())
|
||||
conv.message.append({
|
||||
"role": "user",
|
||||
"content": query,
|
||||
"id": message_id
|
||||
})
|
||||
txt = ""
|
||||
for ans in canvas.run(query=query, files=files, user_id=user_id, inputs=inputs):
|
||||
ans["session_id"] = session_id
|
||||
if ans["event"] == "message":
|
||||
txt += ans["data"]["content"]
|
||||
yield "data:" + json.dumps(ans, ensure_ascii=False) + "\n\n"
|
||||
|
||||
conv.message.append({"role": "assistant", "content": txt, "created_at": time.time(), "id": message_id})
|
||||
conv.reference = canvas.get_reference()
|
||||
conv.errors = canvas.error
|
||||
conv.dsl = str(canvas)
|
||||
conv = conv.to_dict()
|
||||
API4ConversationService.append_message(conv["id"], conv)
|
||||
|
||||
|
||||
def completionOpenAI(tenant_id, agent_id, question, session_id=None, stream=True, **kwargs):
|
||||
tiktokenenc = tiktoken.get_encoding("cl100k_base")
|
||||
prompt_tokens = len(tiktokenenc.encode(str(question)))
|
||||
user_id = kwargs.get("user_id", "")
|
||||
|
||||
if stream:
|
||||
completion_tokens = 0
|
||||
try:
|
||||
for ans in completion(
|
||||
tenant_id=tenant_id,
|
||||
agent_id=agent_id,
|
||||
session_id=session_id,
|
||||
query=question,
|
||||
user_id=user_id,
|
||||
**kwargs
|
||||
):
|
||||
if isinstance(ans, str):
|
||||
try:
|
||||
ans = json.loads(ans[5:]) # remove "data:"
|
||||
except Exception as e:
|
||||
logging.exception(f"Agent OpenAI-Compatible completionOpenAI parse answer failed: {e}")
|
||||
continue
|
||||
if ans.get("event") not in ["message", "message_end"]:
|
||||
continue
|
||||
|
||||
content_piece = ""
|
||||
if ans["event"] == "message":
|
||||
content_piece = ans["data"]["content"]
|
||||
|
||||
completion_tokens += len(tiktokenenc.encode(content_piece))
|
||||
|
||||
openai_data = get_data_openai(
|
||||
id=session_id or str(uuid4()),
|
||||
model=agent_id,
|
||||
content=content_piece,
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
stream=True
|
||||
)
|
||||
|
||||
if ans.get("data", {}).get("reference", None):
|
||||
openai_data["choices"][0]["delta"]["reference"] = ans["data"]["reference"]
|
||||
|
||||
yield "data: " + json.dumps(openai_data, ensure_ascii=False) + "\n\n"
|
||||
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield "data: " + json.dumps(
|
||||
get_data_openai(
|
||||
id=session_id or str(uuid4()),
|
||||
model=agent_id,
|
||||
content=f"**ERROR**: {str(e)}",
|
||||
finish_reason="stop",
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=len(tiktokenenc.encode(f"**ERROR**: {str(e)}")),
|
||||
stream=True
|
||||
),
|
||||
ensure_ascii=False
|
||||
) + "\n\n"
|
||||
yield "data: [DONE]\n\n"
|
||||
|
||||
else:
|
||||
try:
|
||||
all_content = ""
|
||||
reference = {}
|
||||
for ans in completion(
|
||||
tenant_id=tenant_id,
|
||||
agent_id=agent_id,
|
||||
session_id=session_id,
|
||||
query=question,
|
||||
user_id=user_id,
|
||||
**kwargs
|
||||
):
|
||||
if isinstance(ans, str):
|
||||
ans = json.loads(ans[5:])
|
||||
if ans.get("event") not in ["message", "message_end"]:
|
||||
continue
|
||||
|
||||
if ans["event"] == "message":
|
||||
all_content += ans["data"]["content"]
|
||||
|
||||
if ans.get("data", {}).get("reference", None):
|
||||
reference.update(ans["data"]["reference"])
|
||||
|
||||
completion_tokens = len(tiktokenenc.encode(all_content))
|
||||
|
||||
openai_data = get_data_openai(
|
||||
id=session_id or str(uuid4()),
|
||||
model=agent_id,
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
content=all_content,
|
||||
finish_reason="stop",
|
||||
param=None
|
||||
)
|
||||
|
||||
if reference:
|
||||
openai_data["choices"][0]["message"]["reference"] = reference
|
||||
|
||||
yield openai_data
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
yield get_data_openai(
|
||||
id=session_id or str(uuid4()),
|
||||
model=agent_id,
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=len(tiktokenenc.encode(f"**ERROR**: {str(e)}")),
|
||||
content=f"**ERROR**: {str(e)}",
|
||||
finish_reason="stop",
|
||||
param=None
|
||||
)
|
||||
345
api/db/services/common_service.py
Normal file
345
api/db/services/common_service.py
Normal file
@@ -0,0 +1,345 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
from tenacity import retry, stop_after_attempt, wait_exponential, retry_if_exception_type
|
||||
import peewee
|
||||
from peewee import InterfaceError, OperationalError
|
||||
|
||||
from api.db.db_models import DB
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
|
||||
def retry_db_operation(func):
|
||||
@retry(
|
||||
stop=stop_after_attempt(3),
|
||||
wait=wait_exponential(multiplier=1, min=1, max=5),
|
||||
retry=retry_if_exception_type((InterfaceError, OperationalError)),
|
||||
before_sleep=lambda retry_state: print(f"RETRY {retry_state.attempt_number} TIMES"),
|
||||
reraise=True,
|
||||
)
|
||||
def wrapper(*args, **kwargs):
|
||||
return func(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
class CommonService:
|
||||
"""Base service class that provides common database operations.
|
||||
|
||||
This class serves as a foundation for all service classes in the application,
|
||||
implementing standard CRUD operations and common database query patterns.
|
||||
It uses the Peewee ORM for database interactions and provides a consistent
|
||||
interface for database operations across all derived service classes.
|
||||
|
||||
Attributes:
|
||||
model: The Peewee model class that this service operates on. Must be set by subclasses.
|
||||
"""
|
||||
|
||||
model = None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
"""Execute a database query with optional column selection and ordering.
|
||||
|
||||
This method provides a flexible way to query the database with various filters
|
||||
and sorting options. It supports column selection, sort order control, and
|
||||
additional filter conditions.
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by.
|
||||
**kwargs: Additional filter conditions passed as keyword arguments.
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query result containing matching records.
|
||||
"""
|
||||
return cls.model.query(cols=cols, reverse=reverse, order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all(cls, cols=None, reverse=None, order_by=None):
|
||||
"""Retrieve all records from the database with optional column selection and ordering.
|
||||
|
||||
This method fetches all records from the model's table with support for
|
||||
column selection and result ordering. If no order_by is specified and reverse
|
||||
is True, it defaults to ordering by create_time.
|
||||
|
||||
Args:
|
||||
cols (list, optional): List of column names to select. If None, selects all columns.
|
||||
reverse (bool, optional): If True, sorts in descending order. If False, sorts in ascending order.
|
||||
order_by (str, optional): Column name to sort results by. Defaults to 'create_time' if reverse is specified.
|
||||
|
||||
Returns:
|
||||
peewee.ModelSelect: A query containing all matching records.
|
||||
"""
|
||||
if cols:
|
||||
query_records = cls.model.select(*cols)
|
||||
else:
|
||||
query_records = cls.model.select()
|
||||
if reverse is not None:
|
||||
if not order_by or not hasattr(cls, order_by):
|
||||
order_by = "create_time"
|
||||
if reverse is True:
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).desc())
|
||||
elif reverse is False:
|
||||
query_records = query_records.order_by(cls.model.getter_by(order_by).asc())
|
||||
return query_records
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get(cls, **kwargs):
|
||||
"""Get a single record matching the given criteria.
|
||||
|
||||
This method retrieves a single record from the database that matches
|
||||
the specified filter conditions.
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: Single matching record.
|
||||
|
||||
Raises:
|
||||
peewee.DoesNotExist: If no matching record is found.
|
||||
"""
|
||||
return cls.model.get(**kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_or_none(cls, **kwargs):
|
||||
"""Get a single record or None if not found.
|
||||
|
||||
This method attempts to retrieve a single record matching the given criteria,
|
||||
returning None if no match is found instead of raising an exception.
|
||||
|
||||
Args:
|
||||
**kwargs: Filter conditions as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance or None: Matching record if found, None otherwise.
|
||||
"""
|
||||
try:
|
||||
return cls.model.get(**kwargs)
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
"""Save a new record to database.
|
||||
|
||||
This method creates a new record in the database with the provided field values,
|
||||
forcing an insert operation rather than an update.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The created record object.
|
||||
"""
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, **kwargs):
|
||||
"""Insert a new record with automatic ID and timestamps.
|
||||
|
||||
This method creates a new record with automatically generated ID and timestamp fields.
|
||||
It handles the creation of create_time, create_date, update_time, and update_date fields.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The newly created record object.
|
||||
"""
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert_many(cls, data_list, batch_size=100):
|
||||
"""Insert multiple records in batches.
|
||||
|
||||
This method efficiently inserts multiple records into the database using batch processing.
|
||||
It automatically sets creation timestamps for all records.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to insert.
|
||||
batch_size (int, optional): Number of records to insert in each batch. Defaults to 100.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for d in data_list:
|
||||
d["create_time"] = current_timestamp()
|
||||
d["create_date"] = datetime_format(datetime.now())
|
||||
for i in range(0, len(data_list), batch_size):
|
||||
cls.model.insert_many(data_list[i : i + batch_size]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_many_by_id(cls, data_list):
|
||||
"""Update multiple records by their IDs.
|
||||
|
||||
This method updates multiple records in the database, identified by their IDs.
|
||||
It automatically updates the update_time and update_date fields for each record.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to update.
|
||||
Each dictionary must include an 'id' field.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
@retry_db_operation
|
||||
def update_by_id(cls, pid, data):
|
||||
# Update a single record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# data: Updated field values
|
||||
# Returns:
|
||||
# Number of records updated
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
num = cls.model.update(data).where(cls.model.id == pid).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_id(cls, pid):
|
||||
# Get a record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# Returns:
|
||||
# Tuple of (success, record)
|
||||
try:
|
||||
obj = cls.model.get_or_none(cls.model.id == pid)
|
||||
if obj:
|
||||
return True, obj
|
||||
except Exception:
|
||||
pass
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_ids(cls, pids, cols=None):
|
||||
# Get multiple records by their IDs
|
||||
# Args:
|
||||
# pids: List of record IDs
|
||||
# cols: List of columns to select
|
||||
# Returns:
|
||||
# Query of matching records
|
||||
if cols:
|
||||
objs = cls.model.select(*cols)
|
||||
else:
|
||||
objs = cls.model.select()
|
||||
return objs.where(cls.model.id.in_(pids))
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_id(cls, pid):
|
||||
# Delete a record by ID
|
||||
# Args:
|
||||
# pid: Record ID
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
return cls.model.delete().where(cls.model.id == pid).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_ids(cls, pids):
|
||||
# Delete multiple records by their IDs
|
||||
# Args:
|
||||
# pids: List of record IDs
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
with DB.atomic():
|
||||
res = cls.model.delete().where(cls.model.id.in_(pids)).execute()
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_delete(cls, filters):
|
||||
# Delete records matching given filters
|
||||
# Args:
|
||||
# filters: List of filter conditions
|
||||
# Returns:
|
||||
# Number of records deleted
|
||||
with DB.atomic():
|
||||
num = cls.model.delete().where(*filters).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_update(cls, filters, update_data):
|
||||
# Update records matching given filters
|
||||
# Args:
|
||||
# filters: List of filter conditions
|
||||
# update_data: Updated field values
|
||||
# Returns:
|
||||
# Number of records updated
|
||||
with DB.atomic():
|
||||
return cls.model.update(update_data).where(*filters).execute()
|
||||
|
||||
@staticmethod
|
||||
def cut_list(tar_list, n):
|
||||
# Split a list into chunks of size n
|
||||
# Args:
|
||||
# tar_list: List to split
|
||||
# n: Chunk size
|
||||
# Returns:
|
||||
# List of tuples containing chunks
|
||||
length = len(tar_list)
|
||||
arr = range(length)
|
||||
result = [tuple(tar_list[x : (x + n)]) for x in arr[::n]]
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_scope_list(cls, in_key, in_filters_list, filters=None, cols=None):
|
||||
# Get records matching IN clause filters with optional column selection
|
||||
# Args:
|
||||
# in_key: Field name for IN clause
|
||||
# in_filters_list: List of values for IN clause
|
||||
# filters: Additional filter conditions
|
||||
# cols: List of columns to select
|
||||
# Returns:
|
||||
# List of matching records
|
||||
in_filters_tuple_list = cls.cut_list(in_filters_list, 20)
|
||||
if not filters:
|
||||
filters = []
|
||||
res_list = []
|
||||
if cols:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select(*cols).where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
else:
|
||||
for i in in_filters_tuple_list:
|
||||
query_records = cls.model.select().where(getattr(cls.model, in_key).in_(i), *filters)
|
||||
if query_records:
|
||||
res_list.extend([query_record for query_record in query_records])
|
||||
return res_list
|
||||
242
api/db/services/conversation_service.py
Normal file
242
api/db/services/conversation_service.py
Normal file
@@ -0,0 +1,242 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import time
|
||||
from uuid import uuid4
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import Conversation, DB
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.dialog_service import DialogService, chat
|
||||
from api.utils import get_uuid
|
||||
import json
|
||||
|
||||
from rag.prompts.generator import chunks_format
|
||||
|
||||
|
||||
class ConversationService(CommonService):
|
||||
model = Conversation
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, dialog_id, page_number, items_per_page, orderby, desc, id, name, user_id=None):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id == dialog_id)
|
||||
if id:
|
||||
sessions = sessions.where(cls.model.id == id)
|
||||
if name:
|
||||
sessions = sessions.where(cls.model.name == name)
|
||||
if user_id:
|
||||
sessions = sessions.where(cls.model.user_id == user_id)
|
||||
if desc:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
sessions = sessions.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
sessions = sessions.paginate(page_number, items_per_page)
|
||||
|
||||
return list(sessions.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_conversation_by_dialog_ids(cls, dialog_ids):
|
||||
sessions = cls.model.select().where(cls.model.dialog_id.in_(dialog_ids))
|
||||
sessions.order_by(cls.model.create_time.asc())
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
s_batch = sessions.offset(offset).limit(limit)
|
||||
_temp = list(s_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
def structure_answer(conv, ans, message_id, session_id):
|
||||
reference = ans["reference"]
|
||||
if not isinstance(reference, dict):
|
||||
reference = {}
|
||||
ans["reference"] = {}
|
||||
|
||||
chunk_list = chunks_format(reference)
|
||||
|
||||
reference["chunks"] = chunk_list
|
||||
ans["id"] = message_id
|
||||
ans["session_id"] = session_id
|
||||
|
||||
if not conv:
|
||||
return ans
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
if not conv.message or conv.message[-1].get("role", "") != "assistant":
|
||||
conv.message.append({"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id})
|
||||
else:
|
||||
conv.message[-1] = {"role": "assistant", "content": ans["answer"], "created_at": time.time(), "id": message_id}
|
||||
if conv.reference:
|
||||
conv.reference[-1] = reference
|
||||
return ans
|
||||
|
||||
|
||||
def completion(tenant_id, chat_id, question, name="New session", session_id=None, stream=True, **kwargs):
|
||||
assert name, "`name` can not be empty."
|
||||
dia = DialogService.query(id=chat_id, tenant_id=tenant_id, status=StatusEnum.VALID.value)
|
||||
assert dia, "You do not own the chat."
|
||||
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": chat_id,
|
||||
"name": name,
|
||||
"message": [{"role": "assistant", "content": dia[0].prompt_config.get("prologue"), "created_at": time.time()}],
|
||||
"user_id": kwargs.get("user_id", "")
|
||||
}
|
||||
ConversationService.save(**conv)
|
||||
if stream:
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
|
||||
conv = ConversationService.query(id=session_id, dialog_id=chat_id)
|
||||
if not conv:
|
||||
raise LookupError("Session does not exist")
|
||||
|
||||
conv = conv[0]
|
||||
msg = []
|
||||
question = {
|
||||
"content": question,
|
||||
"role": "user",
|
||||
"id": str(uuid4())
|
||||
}
|
||||
conv.message.append(question)
|
||||
for m in conv.message:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
message_id = msg[-1].get("id")
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
|
||||
kb_ids = kwargs.get("kb_ids",[])
|
||||
dia.kb_ids = list(set(dia.kb_ids + kb_ids))
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.message.append({"role": "assistant", "content": "", "id": message_id})
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "data": ans}, ensure_ascii=False) + "\n\n"
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
ConversationService.update_by_id(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
|
||||
|
||||
def iframe_completion(dialog_id, question, session_id=None, stream=True, **kwargs):
|
||||
e, dia = DialogService.get_by_id(dialog_id)
|
||||
assert e, "Dialog not found"
|
||||
if not session_id:
|
||||
session_id = get_uuid()
|
||||
conv = {
|
||||
"id": session_id,
|
||||
"dialog_id": dialog_id,
|
||||
"user_id": kwargs.get("user_id", ""),
|
||||
"message": [{"role": "assistant", "content": dia.prompt_config["prologue"], "created_at": time.time()}]
|
||||
}
|
||||
API4ConversationService.save(**conv)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "",
|
||||
"data": {
|
||||
"answer": conv["message"][0]["content"],
|
||||
"reference": {},
|
||||
"audio_binary": None,
|
||||
"id": None,
|
||||
"session_id": session_id
|
||||
}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
return
|
||||
else:
|
||||
session_id = session_id
|
||||
e, conv = API4ConversationService.get_by_id(session_id)
|
||||
assert e, "Session not found!"
|
||||
|
||||
if not conv.message:
|
||||
conv.message = []
|
||||
messages = conv.message
|
||||
question = {
|
||||
"role": "user",
|
||||
"content": question,
|
||||
"id": str(uuid4())
|
||||
}
|
||||
messages.append(question)
|
||||
|
||||
msg = []
|
||||
for m in messages:
|
||||
if m["role"] == "system":
|
||||
continue
|
||||
if m["role"] == "assistant" and not msg:
|
||||
continue
|
||||
msg.append(m)
|
||||
if not msg[-1].get("id"):
|
||||
msg[-1]["id"] = get_uuid()
|
||||
message_id = msg[-1]["id"]
|
||||
|
||||
if not conv.reference:
|
||||
conv.reference = []
|
||||
conv.reference.append({"chunks": [], "doc_aggs": []})
|
||||
|
||||
if stream:
|
||||
try:
|
||||
for ans in chat(dia, msg, True, **kwargs):
|
||||
ans = structure_answer(conv, ans, message_id, session_id)
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": ans},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
except Exception as e:
|
||||
yield "data:" + json.dumps({"code": 500, "message": str(e),
|
||||
"data": {"answer": "**ERROR**: " + str(e), "reference": []}},
|
||||
ensure_ascii=False) + "\n\n"
|
||||
yield "data:" + json.dumps({"code": 0, "message": "", "data": True}, ensure_ascii=False) + "\n\n"
|
||||
|
||||
else:
|
||||
answer = None
|
||||
for ans in chat(dia, msg, False, **kwargs):
|
||||
answer = structure_answer(conv, ans, message_id, session_id)
|
||||
API4ConversationService.append_message(conv.id, conv.to_dict())
|
||||
break
|
||||
yield answer
|
||||
868
api/db/services/dialog_service.py
Normal file
868
api/db/services/dialog_service.py
Normal file
@@ -0,0 +1,868 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import binascii
|
||||
import logging
|
||||
import re
|
||||
import time
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from functools import partial
|
||||
from timeit import default_timer as timer
|
||||
import trio
|
||||
from langfuse import Langfuse
|
||||
from peewee import fn
|
||||
from agentic_reasoning import DeepResearcher
|
||||
from api import settings
|
||||
from api.db import LLMType, ParserType, StatusEnum
|
||||
from api.db.db_models import DB, Dialog
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
from rag.app.resume import forbidden_select_fields4resume
|
||||
from rag.app.tag import label_question
|
||||
from rag.nlp.search import index_name
|
||||
from rag.prompts.generator import chunks_format, citation_prompt, cross_languages, full_question, kb_prompt, keyword_extraction, message_fit_in, \
|
||||
gen_meta_filter, PROMPT_JINJA_ENV, ASK_SUMMARY
|
||||
from rag.utils import num_tokens_from_string, rmSpace
|
||||
from rag.utils.tavily_conn import Tavily
|
||||
|
||||
|
||||
class DialogService(CommonService):
|
||||
model = Dialog
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
"""Save a new record to database.
|
||||
|
||||
This method creates a new record in the database with the provided field values,
|
||||
forcing an insert operation rather than an update.
|
||||
|
||||
Args:
|
||||
**kwargs: Record field values as keyword arguments.
|
||||
|
||||
Returns:
|
||||
Model instance: The created record object.
|
||||
"""
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
def update_many_by_id(cls, data_list):
|
||||
"""Update multiple records by their IDs.
|
||||
|
||||
This method updates multiple records in the database, identified by their IDs.
|
||||
It automatically updates the update_time and update_date fields for each record.
|
||||
|
||||
Args:
|
||||
data_list (list): List of dictionaries containing record data to update.
|
||||
Each dictionary must include an 'id' field.
|
||||
"""
|
||||
with DB.atomic():
|
||||
for data in data_list:
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(data).where(cls.model.id == data["id"]).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, tenant_id, page_number, items_per_page, orderby, desc, id, name):
|
||||
chats = cls.model.select()
|
||||
if id:
|
||||
chats = chats.where(cls.model.id == id)
|
||||
if name:
|
||||
chats = chats.where(cls.model.name == name)
|
||||
chats = chats.where((cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value))
|
||||
if desc:
|
||||
chats = chats.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
chats = chats.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
chats = chats.paginate(page_number, items_per_page)
|
||||
|
||||
return list(chats.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, orderby, desc, keywords, parser_id=None):
|
||||
from api.db.db_models import User
|
||||
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.tenant_id,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.language,
|
||||
cls.model.llm_id,
|
||||
cls.model.llm_setting,
|
||||
cls.model.prompt_type,
|
||||
cls.model.prompt_config,
|
||||
cls.model.similarity_threshold,
|
||||
cls.model.vector_similarity_weight,
|
||||
cls.model.top_n,
|
||||
cls.model.top_k,
|
||||
cls.model.do_refer,
|
||||
cls.model.rerank_id,
|
||||
cls.model.kb_ids,
|
||||
cls.model.icon,
|
||||
cls.model.status,
|
||||
User.nickname,
|
||||
User.avatar.alias("tenant_avatar"),
|
||||
cls.model.update_time,
|
||||
cls.model.create_time,
|
||||
]
|
||||
if keywords:
|
||||
dialogs = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=(cls.model.tenant_id == User.id))
|
||||
.where(
|
||||
(cls.model.tenant_id.in_(joined_tenant_ids) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower())),
|
||||
)
|
||||
)
|
||||
else:
|
||||
dialogs = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=(cls.model.tenant_id == User.id))
|
||||
.where(
|
||||
(cls.model.tenant_id.in_(joined_tenant_ids) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value),
|
||||
)
|
||||
)
|
||||
if parser_id:
|
||||
dialogs = dialogs.where(cls.model.parser_id == parser_id)
|
||||
if desc:
|
||||
dialogs = dialogs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
dialogs = dialogs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = dialogs.count()
|
||||
|
||||
if page_number and items_per_page:
|
||||
dialogs = dialogs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(dialogs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_dialogs_by_tenant_id(cls, tenant_id):
|
||||
fields = [cls.model.id]
|
||||
dialogs = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id)
|
||||
dialogs.order_by(cls.model.create_time.asc())
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
d_batch = dialogs.offset(offset).limit(limit)
|
||||
_temp = list(d_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
def chat_solo(dialog, messages, stream=True):
|
||||
if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
tts_mdl = None
|
||||
if prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
msg = [{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"]
|
||||
if stream:
|
||||
last_ans = ""
|
||||
delta_ans = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt_config.get("system", ""), msg, dialog.llm_setting):
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
delta_ans = ""
|
||||
if delta_ans:
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans), "prompt": "", "created_at": time.time()}
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt_config.get("system", ""), msg, dialog.llm_setting)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
yield {"answer": answer, "reference": {}, "audio_binary": tts(tts_mdl, answer), "prompt": "", "created_at": time.time()}
|
||||
|
||||
|
||||
def get_models(dialog):
|
||||
embd_mdl, chat_mdl, rerank_mdl, tts_mdl = None, None, None, None
|
||||
kbs = KnowledgebaseService.get_by_ids(dialog.kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
if len(embedding_list) > 1:
|
||||
raise Exception("**ERROR**: Knowledge bases use different embedding models.")
|
||||
|
||||
if embedding_list:
|
||||
embd_mdl = LLMBundle(dialog.tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
if not embd_mdl:
|
||||
raise LookupError("Embedding model(%s) not found" % embedding_list[0])
|
||||
|
||||
if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
chat_mdl = LLMBundle(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
if dialog.rerank_id:
|
||||
rerank_mdl = LLMBundle(dialog.tenant_id, LLMType.RERANK, dialog.rerank_id)
|
||||
|
||||
if dialog.prompt_config.get("tts"):
|
||||
tts_mdl = LLMBundle(dialog.tenant_id, LLMType.TTS)
|
||||
return kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl
|
||||
|
||||
|
||||
BAD_CITATION_PATTERNS = [
|
||||
re.compile(r"\(\s*ID\s*[: ]*\s*(\d+)\s*\)"), # (ID: 12)
|
||||
re.compile(r"\[\s*ID\s*[: ]*\s*(\d+)\s*\]"), # [ID: 12]
|
||||
re.compile(r"【\s*ID\s*[: ]*\s*(\d+)\s*】"), # 【ID: 12】
|
||||
re.compile(r"ref\s*(\d+)", flags=re.IGNORECASE), # ref12、REF 12
|
||||
]
|
||||
|
||||
|
||||
def repair_bad_citation_formats(answer: str, kbinfos: dict, idx: set):
|
||||
max_index = len(kbinfos["chunks"])
|
||||
|
||||
def safe_add(i):
|
||||
if 0 <= i < max_index:
|
||||
idx.add(i)
|
||||
return True
|
||||
return False
|
||||
|
||||
def find_and_replace(pattern, group_index=1, repl=lambda i: f"ID:{i}", flags=0):
|
||||
nonlocal answer
|
||||
|
||||
def replacement(match):
|
||||
try:
|
||||
i = int(match.group(group_index))
|
||||
if safe_add(i):
|
||||
return f"[{repl(i)}]"
|
||||
except Exception:
|
||||
pass
|
||||
return match.group(0)
|
||||
|
||||
answer = re.sub(pattern, replacement, answer, flags=flags)
|
||||
|
||||
for pattern in BAD_CITATION_PATTERNS:
|
||||
find_and_replace(pattern)
|
||||
|
||||
return answer, idx
|
||||
|
||||
|
||||
def convert_conditions(metadata_condition):
|
||||
if metadata_condition is None:
|
||||
metadata_condition = {}
|
||||
op_mapping = {
|
||||
"is": "=",
|
||||
"not is": "≠"
|
||||
}
|
||||
return [
|
||||
{
|
||||
"op": op_mapping.get(cond["comparison_operator"], cond["comparison_operator"]),
|
||||
"key": cond["name"],
|
||||
"value": cond["value"]
|
||||
}
|
||||
for cond in metadata_condition.get("conditions", [])
|
||||
]
|
||||
|
||||
|
||||
def meta_filter(metas: dict, filters: list[dict]):
|
||||
doc_ids = set([])
|
||||
|
||||
def filter_out(v2docs, operator, value):
|
||||
ids = []
|
||||
for input, docids in v2docs.items():
|
||||
try:
|
||||
input = float(input)
|
||||
value = float(value)
|
||||
except Exception:
|
||||
input = str(input)
|
||||
value = str(value)
|
||||
|
||||
for conds in [
|
||||
(operator == "contains", str(value).lower() in str(input).lower()),
|
||||
(operator == "not contains", str(value).lower() not in str(input).lower()),
|
||||
(operator == "start with", str(input).lower().startswith(str(value).lower())),
|
||||
(operator == "end with", str(input).lower().endswith(str(value).lower())),
|
||||
(operator == "empty", not input),
|
||||
(operator == "not empty", input),
|
||||
(operator == "=", input == value),
|
||||
(operator == "≠", input != value),
|
||||
(operator == ">", input > value),
|
||||
(operator == "<", input < value),
|
||||
(operator == "≥", input >= value),
|
||||
(operator == "≤", input <= value),
|
||||
]:
|
||||
try:
|
||||
if all(conds):
|
||||
ids.extend(docids)
|
||||
break
|
||||
except Exception:
|
||||
pass
|
||||
return ids
|
||||
|
||||
for k, v2docs in metas.items():
|
||||
for f in filters:
|
||||
if k != f["key"]:
|
||||
continue
|
||||
ids = filter_out(v2docs, f["op"], f["value"])
|
||||
if not doc_ids:
|
||||
doc_ids = set(ids)
|
||||
else:
|
||||
doc_ids = doc_ids & set(ids)
|
||||
if not doc_ids:
|
||||
return []
|
||||
return list(doc_ids)
|
||||
|
||||
|
||||
def chat(dialog, messages, stream=True, **kwargs):
|
||||
assert messages[-1]["role"] == "user", "The last content of this conversation is not from user."
|
||||
if not dialog.kb_ids and not dialog.prompt_config.get("tavily_api_key"):
|
||||
for ans in chat_solo(dialog, messages, stream):
|
||||
yield ans
|
||||
return
|
||||
|
||||
chat_start_ts = timer()
|
||||
|
||||
if TenantLLMService.llm_id2llm_type(dialog.llm_id) == "image2text":
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.IMAGE2TEXT, dialog.llm_id)
|
||||
else:
|
||||
llm_model_config = TenantLLMService.get_model_config(dialog.tenant_id, LLMType.CHAT, dialog.llm_id)
|
||||
|
||||
max_tokens = llm_model_config.get("max_tokens", 8192)
|
||||
|
||||
check_llm_ts = timer()
|
||||
|
||||
langfuse_tracer = None
|
||||
trace_context = {}
|
||||
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=dialog.tenant_id)
|
||||
if langfuse_keys:
|
||||
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||
if langfuse.auth_check():
|
||||
langfuse_tracer = langfuse
|
||||
trace_id = langfuse_tracer.create_trace_id()
|
||||
trace_context = {"trace_id": trace_id}
|
||||
|
||||
check_langfuse_tracer_ts = timer()
|
||||
kbs, embd_mdl, rerank_mdl, chat_mdl, tts_mdl = get_models(dialog)
|
||||
toolcall_session, tools = kwargs.get("toolcall_session"), kwargs.get("tools")
|
||||
if toolcall_session and tools:
|
||||
chat_mdl.bind_tools(toolcall_session, tools)
|
||||
bind_models_ts = timer()
|
||||
|
||||
retriever = settings.retrievaler
|
||||
questions = [m["content"] for m in messages if m["role"] == "user"][-3:]
|
||||
attachments = kwargs["doc_ids"].split(",") if "doc_ids" in kwargs else []
|
||||
if "doc_ids" in messages[-1]:
|
||||
attachments = messages[-1]["doc_ids"]
|
||||
|
||||
prompt_config = dialog.prompt_config
|
||||
field_map = KnowledgebaseService.get_field_map(dialog.kb_ids)
|
||||
# try to use sql if field mapping is good to go
|
||||
if field_map:
|
||||
logging.debug("Use SQL to retrieval:{}".format(questions[-1]))
|
||||
ans = use_sql(questions[-1], field_map, dialog.tenant_id, chat_mdl, prompt_config.get("quote", True), dialog.kb_ids)
|
||||
if ans:
|
||||
yield ans
|
||||
return
|
||||
|
||||
for p in prompt_config["parameters"]:
|
||||
if p["key"] == "knowledge":
|
||||
continue
|
||||
if p["key"] not in kwargs and not p["optional"]:
|
||||
raise KeyError("Miss parameter: " + p["key"])
|
||||
if p["key"] not in kwargs:
|
||||
prompt_config["system"] = prompt_config["system"].replace("{%s}" % p["key"], " ")
|
||||
|
||||
if len(questions) > 1 and prompt_config.get("refine_multiturn"):
|
||||
questions = [full_question(dialog.tenant_id, dialog.llm_id, messages)]
|
||||
else:
|
||||
questions = questions[-1:]
|
||||
|
||||
if prompt_config.get("cross_languages"):
|
||||
questions = [cross_languages(dialog.tenant_id, dialog.llm_id, questions[0], prompt_config["cross_languages"])]
|
||||
|
||||
if dialog.meta_data_filter:
|
||||
metas = DocumentService.get_meta_by_kbs(dialog.kb_ids)
|
||||
if dialog.meta_data_filter.get("method") == "auto":
|
||||
filters = gen_meta_filter(chat_mdl, metas, questions[-1])
|
||||
attachments.extend(meta_filter(metas, filters))
|
||||
if not attachments:
|
||||
attachments = None
|
||||
elif dialog.meta_data_filter.get("method") == "manual":
|
||||
attachments.extend(meta_filter(metas, dialog.meta_data_filter["manual"]))
|
||||
if not attachments:
|
||||
attachments = None
|
||||
|
||||
if prompt_config.get("keyword", False):
|
||||
questions[-1] += keyword_extraction(chat_mdl, questions[-1])
|
||||
|
||||
refine_question_ts = timer()
|
||||
|
||||
thought = ""
|
||||
kbinfos = {"total": 0, "chunks": [], "doc_aggs": []}
|
||||
knowledges = []
|
||||
|
||||
if attachments is not None and "knowledge" in [p["key"] for p in prompt_config["parameters"]]:
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
knowledges = []
|
||||
if prompt_config.get("reasoning", False):
|
||||
reasoner = DeepResearcher(
|
||||
chat_mdl,
|
||||
prompt_config,
|
||||
partial(
|
||||
retriever.retrieval,
|
||||
embd_mdl=embd_mdl,
|
||||
tenant_ids=tenant_ids,
|
||||
kb_ids=dialog.kb_ids,
|
||||
page=1,
|
||||
page_size=dialog.top_n,
|
||||
similarity_threshold=0.2,
|
||||
vector_similarity_weight=0.3,
|
||||
doc_ids=attachments,
|
||||
),
|
||||
)
|
||||
|
||||
for think in reasoner.thinking(kbinfos, " ".join(questions)):
|
||||
if isinstance(think, str):
|
||||
thought = think
|
||||
knowledges = [t for t in think.split("\n") if t]
|
||||
elif stream:
|
||||
yield think
|
||||
else:
|
||||
if embd_mdl:
|
||||
kbinfos = retriever.retrieval(
|
||||
" ".join(questions),
|
||||
embd_mdl,
|
||||
tenant_ids,
|
||||
dialog.kb_ids,
|
||||
1,
|
||||
dialog.top_n,
|
||||
dialog.similarity_threshold,
|
||||
dialog.vector_similarity_weight,
|
||||
doc_ids=attachments,
|
||||
top=dialog.top_k,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(" ".join(questions), kbs),
|
||||
)
|
||||
if prompt_config.get("tavily_api_key"):
|
||||
tav = Tavily(prompt_config["tavily_api_key"])
|
||||
tav_res = tav.retrieve_chunks(" ".join(questions))
|
||||
kbinfos["chunks"].extend(tav_res["chunks"])
|
||||
kbinfos["doc_aggs"].extend(tav_res["doc_aggs"])
|
||||
if prompt_config.get("use_kg"):
|
||||
ck = settings.kg_retrievaler.retrieval(" ".join(questions), tenant_ids, dialog.kb_ids, embd_mdl,
|
||||
LLMBundle(dialog.tenant_id, LLMType.CHAT))
|
||||
if ck["content_with_weight"]:
|
||||
kbinfos["chunks"].insert(0, ck)
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
|
||||
logging.debug("{}->{}".format(" ".join(questions), "\n->".join(knowledges)))
|
||||
|
||||
retrieval_ts = timer()
|
||||
if not knowledges and prompt_config.get("empty_response"):
|
||||
empty_res = prompt_config["empty_response"]
|
||||
yield {"answer": empty_res, "reference": kbinfos, "prompt": "\n\n### Query:\n%s" % " ".join(questions),
|
||||
"audio_binary": tts(tts_mdl, empty_res)}
|
||||
return {"answer": prompt_config["empty_response"], "reference": kbinfos}
|
||||
|
||||
kwargs["knowledge"] = "\n------\n" + "\n\n------\n\n".join(knowledges)
|
||||
gen_conf = dialog.llm_setting
|
||||
|
||||
msg = [{"role": "system", "content": prompt_config["system"].format(**kwargs)}]
|
||||
prompt4citation = ""
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
prompt4citation = citation_prompt()
|
||||
msg.extend([{"role": m["role"], "content": re.sub(r"##\d+\$\$", "", m["content"])} for m in messages if m["role"] != "system"])
|
||||
used_token_count, msg = message_fit_in(msg, int(max_tokens * 0.95))
|
||||
assert len(msg) >= 2, f"message_fit_in has bug: {msg}"
|
||||
prompt = msg[0]["content"]
|
||||
|
||||
if "max_tokens" in gen_conf:
|
||||
gen_conf["max_tokens"] = min(gen_conf["max_tokens"], max_tokens - used_token_count)
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal embd_mdl, prompt_config, knowledges, kwargs, kbinfos, prompt, retrieval_ts, questions, langfuse_tracer
|
||||
|
||||
refs = []
|
||||
ans = answer.split("</think>")
|
||||
think = ""
|
||||
if len(ans) == 2:
|
||||
think = ans[0] + "</think>"
|
||||
answer = ans[1]
|
||||
|
||||
if knowledges and (prompt_config.get("quote", True) and kwargs.get("quote", True)):
|
||||
idx = set([])
|
||||
if embd_mdl and not re.search(r"\[ID:([0-9]+)\]", answer):
|
||||
answer, idx = retriever.insert_citations(
|
||||
answer,
|
||||
[ck["content_ltks"] for ck in kbinfos["chunks"]],
|
||||
[ck["vector"] for ck in kbinfos["chunks"]],
|
||||
embd_mdl,
|
||||
tkweight=1 - dialog.vector_similarity_weight,
|
||||
vtweight=dialog.vector_similarity_weight,
|
||||
)
|
||||
else:
|
||||
for match in re.finditer(r"\[ID:([0-9]+)\]", answer):
|
||||
i = int(match.group(1))
|
||||
if i < len(kbinfos["chunks"]):
|
||||
idx.add(i)
|
||||
|
||||
answer, idx = repair_bad_citation_formats(answer, kbinfos, idx)
|
||||
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model providers -> API-Key'"
|
||||
finish_chat_ts = timer()
|
||||
|
||||
total_time_cost = (finish_chat_ts - chat_start_ts) * 1000
|
||||
check_llm_time_cost = (check_llm_ts - chat_start_ts) * 1000
|
||||
check_langfuse_tracer_cost = (check_langfuse_tracer_ts - check_llm_ts) * 1000
|
||||
bind_embedding_time_cost = (bind_models_ts - check_langfuse_tracer_ts) * 1000
|
||||
refine_question_time_cost = (refine_question_ts - bind_models_ts) * 1000
|
||||
retrieval_time_cost = (retrieval_ts - refine_question_ts) * 1000
|
||||
generate_result_time_cost = (finish_chat_ts - retrieval_ts) * 1000
|
||||
|
||||
tk_num = num_tokens_from_string(think + answer)
|
||||
prompt += "\n\n### Query:\n%s" % " ".join(questions)
|
||||
prompt = (
|
||||
f"{prompt}\n\n"
|
||||
"## Time elapsed:\n"
|
||||
f" - Total: {total_time_cost:.1f}ms\n"
|
||||
f" - Check LLM: {check_llm_time_cost:.1f}ms\n"
|
||||
f" - Check Langfuse tracer: {check_langfuse_tracer_cost:.1f}ms\n"
|
||||
f" - Bind models: {bind_embedding_time_cost:.1f}ms\n"
|
||||
f" - Query refinement(LLM): {refine_question_time_cost:.1f}ms\n"
|
||||
f" - Retrieval: {retrieval_time_cost:.1f}ms\n"
|
||||
f" - Generate answer: {generate_result_time_cost:.1f}ms\n\n"
|
||||
"## Token usage:\n"
|
||||
f" - Generated tokens(approximately): {tk_num}\n"
|
||||
f" - Token speed: {int(tk_num / (generate_result_time_cost / 1000.0))}/s"
|
||||
)
|
||||
|
||||
# Add a condition check to call the end method only if langfuse_tracer exists
|
||||
if langfuse_tracer and "langfuse_generation" in locals():
|
||||
langfuse_output = "\n" + re.sub(r"^.*?(### Query:.*)", r"\1", prompt, flags=re.DOTALL)
|
||||
langfuse_output = {"time_elapsed:": re.sub(r"\n", " \n", langfuse_output), "created_at": time.time()}
|
||||
langfuse_generation.update(output=langfuse_output)
|
||||
langfuse_generation.end()
|
||||
|
||||
return {"answer": think + answer, "reference": refs, "prompt": re.sub(r"\n", " \n", prompt), "created_at": time.time()}
|
||||
|
||||
if langfuse_tracer:
|
||||
langfuse_generation = langfuse_tracer.start_generation(
|
||||
trace_context=trace_context, name="chat", model=llm_model_config["llm_name"],
|
||||
input={"prompt": prompt, "prompt4citation": prompt4citation, "messages": msg}
|
||||
)
|
||||
|
||||
if stream:
|
||||
last_ans = ""
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(prompt + prompt4citation, msg[1:], gen_conf):
|
||||
if thought:
|
||||
ans = re.sub(r"^.*</think>", "", ans, flags=re.DOTALL)
|
||||
answer = ans
|
||||
delta_ans = ans[len(last_ans):]
|
||||
if num_tokens_from_string(delta_ans) < 16:
|
||||
continue
|
||||
last_ans = answer
|
||||
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
delta_ans = answer[len(last_ans):]
|
||||
if delta_ans:
|
||||
yield {"answer": thought + answer, "reference": {}, "audio_binary": tts(tts_mdl, delta_ans)}
|
||||
yield decorate_answer(thought + answer)
|
||||
else:
|
||||
answer = chat_mdl.chat(prompt + prompt4citation, msg[1:], gen_conf)
|
||||
user_content = msg[-1].get("content", "[content not available]")
|
||||
logging.debug("User: {}|Assistant: {}".format(user_content, answer))
|
||||
res = decorate_answer(answer)
|
||||
res["audio_binary"] = tts(tts_mdl, answer)
|
||||
yield res
|
||||
|
||||
|
||||
def use_sql(question, field_map, tenant_id, chat_mdl, quota=True, kb_ids=None):
|
||||
sys_prompt = "You are a Database Administrator. You need to check the fields of the following tables based on the user's list of questions and write the SQL corresponding to the last question."
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
Question are as follows:
|
||||
{}
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question)
|
||||
tried_times = 0
|
||||
|
||||
def get_table():
|
||||
nonlocal sys_prompt, user_prompt, question, tried_times
|
||||
sql = chat_mdl.chat(sys_prompt, [{"role": "user", "content": user_prompt}], {"temperature": 0.06})
|
||||
sql = re.sub(r"^.*</think>", "", sql, flags=re.DOTALL)
|
||||
logging.debug(f"{question} ==> {user_prompt} get SQL: {sql}")
|
||||
sql = re.sub(r"[\r\n]+", " ", sql.lower())
|
||||
sql = re.sub(r".*select ", "select ", sql.lower())
|
||||
sql = re.sub(r" +", " ", sql)
|
||||
sql = re.sub(r"([;;]|```).*", "", sql)
|
||||
if sql[: len("select ")] != "select ":
|
||||
return None, None
|
||||
if not re.search(r"((sum|avg|max|min)\(|group by )", sql.lower()):
|
||||
if sql[: len("select *")] != "select *":
|
||||
sql = "select doc_id,docnm_kwd," + sql[6:]
|
||||
else:
|
||||
flds = []
|
||||
for k in field_map.keys():
|
||||
if k in forbidden_select_fields4resume:
|
||||
continue
|
||||
if len(flds) > 11:
|
||||
break
|
||||
flds.append(k)
|
||||
sql = "select doc_id,docnm_kwd," + ",".join(flds) + sql[8:]
|
||||
|
||||
if kb_ids:
|
||||
kb_filter = "(" + " OR ".join([f"kb_id = '{kb_id}'" for kb_id in kb_ids]) + ")"
|
||||
if "where" not in sql.lower():
|
||||
sql += f" WHERE {kb_filter}"
|
||||
else:
|
||||
sql += f" AND {kb_filter}"
|
||||
|
||||
logging.debug(f"{question} get SQL(refined): {sql}")
|
||||
tried_times += 1
|
||||
return settings.retrievaler.sql_retrieval(sql, format="json"), sql
|
||||
|
||||
tbl, sql = get_table()
|
||||
if tbl is None:
|
||||
return None
|
||||
if tbl.get("error") and tried_times <= 2:
|
||||
user_prompt = """
|
||||
Table name: {};
|
||||
Table of database fields are as follows:
|
||||
{}
|
||||
|
||||
Question are as follows:
|
||||
{}
|
||||
Please write the SQL, only SQL, without any other explanations or text.
|
||||
|
||||
|
||||
The SQL error you provided last time is as follows:
|
||||
{}
|
||||
|
||||
Error issued by database as follows:
|
||||
{}
|
||||
|
||||
Please correct the error and write SQL again, only SQL, without any other explanations or text.
|
||||
""".format(index_name(tenant_id), "\n".join([f"{k}: {v}" for k, v in field_map.items()]), question, sql, tbl["error"])
|
||||
tbl, sql = get_table()
|
||||
logging.debug("TRY it again: {}".format(sql))
|
||||
|
||||
logging.debug("GET table: {}".format(tbl))
|
||||
if tbl.get("error") or len(tbl["rows"]) == 0:
|
||||
return None
|
||||
|
||||
docid_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "doc_id"])
|
||||
doc_name_idx = set([ii for ii, c in enumerate(tbl["columns"]) if c["name"] == "docnm_kwd"])
|
||||
column_idx = [ii for ii in range(len(tbl["columns"])) if ii not in (docid_idx | doc_name_idx)]
|
||||
|
||||
# compose Markdown table
|
||||
columns = (
|
||||
"|" + "|".join(
|
||||
[re.sub(r"(/.*|([^()]+))", "", field_map.get(tbl["columns"][i]["name"], tbl["columns"][i]["name"])) for i in column_idx]) + (
|
||||
"|Source|" if docid_idx and docid_idx else "|")
|
||||
)
|
||||
|
||||
line = "|" + "|".join(["------" for _ in range(len(column_idx))]) + ("|------|" if docid_idx and docid_idx else "")
|
||||
|
||||
rows = ["|" + "|".join([rmSpace(str(r[i])) for i in column_idx]).replace("None", " ") + "|" for r in tbl["rows"]]
|
||||
rows = [r for r in rows if re.sub(r"[ |]+", "", r)]
|
||||
if quota:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
else:
|
||||
rows = "\n".join([r + f" ##{ii}$$ |" for ii, r in enumerate(rows)])
|
||||
rows = re.sub(r"T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]+Z)?\|", "|", rows)
|
||||
|
||||
if not docid_idx or not doc_name_idx:
|
||||
logging.warning("SQL missing field: " + sql)
|
||||
return {"answer": "\n".join([columns, line, rows]), "reference": {"chunks": [], "doc_aggs": []}, "prompt": sys_prompt}
|
||||
|
||||
docid_idx = list(docid_idx)[0]
|
||||
doc_name_idx = list(doc_name_idx)[0]
|
||||
doc_aggs = {}
|
||||
for r in tbl["rows"]:
|
||||
if r[docid_idx] not in doc_aggs:
|
||||
doc_aggs[r[docid_idx]] = {"doc_name": r[doc_name_idx], "count": 0}
|
||||
doc_aggs[r[docid_idx]]["count"] += 1
|
||||
return {
|
||||
"answer": "\n".join([columns, line, rows]),
|
||||
"reference": {
|
||||
"chunks": [{"doc_id": r[docid_idx], "docnm_kwd": r[doc_name_idx]} for r in tbl["rows"]],
|
||||
"doc_aggs": [{"doc_id": did, "doc_name": d["doc_name"], "count": d["count"]} for did, d in doc_aggs.items()],
|
||||
},
|
||||
"prompt": sys_prompt,
|
||||
}
|
||||
|
||||
|
||||
def tts(tts_mdl, text):
|
||||
if not tts_mdl or not text:
|
||||
return
|
||||
bin = b""
|
||||
for chunk in tts_mdl.tts(text):
|
||||
bin += chunk
|
||||
return binascii.hexlify(bin).decode("utf-8")
|
||||
|
||||
|
||||
def ask(question, kb_ids, tenant_id, chat_llm_name=None, search_config={}):
|
||||
doc_ids = search_config.get("doc_ids", [])
|
||||
rerank_mdl = None
|
||||
kb_ids = search_config.get("kb_ids", kb_ids)
|
||||
chat_llm_name = search_config.get("chat_id", chat_llm_name)
|
||||
rerank_id = search_config.get("rerank_id", "")
|
||||
meta_data_filter = search_config.get("meta_data_filter")
|
||||
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
|
||||
is_knowledge_graph = all([kb.parser_id == ParserType.KG for kb in kbs])
|
||||
retriever = settings.retrievaler if not is_knowledge_graph else settings.kg_retrievaler
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, embedding_list[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, chat_llm_name)
|
||||
if rerank_id:
|
||||
rerank_mdl = LLMBundle(tenant_id, LLMType.RERANK, rerank_id)
|
||||
max_tokens = chat_mdl.max_length
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
|
||||
if meta_data_filter:
|
||||
metas = DocumentService.get_meta_by_kbs(kb_ids)
|
||||
if meta_data_filter.get("method") == "auto":
|
||||
filters = gen_meta_filter(chat_mdl, metas, question)
|
||||
doc_ids.extend(meta_filter(metas, filters))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
elif meta_data_filter.get("method") == "manual":
|
||||
doc_ids.extend(meta_filter(metas, meta_data_filter["manual"]))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
|
||||
kbinfos = retriever.retrieval(
|
||||
question=question,
|
||||
embd_mdl=embd_mdl,
|
||||
tenant_ids=tenant_ids,
|
||||
kb_ids=kb_ids,
|
||||
page=1,
|
||||
page_size=12,
|
||||
similarity_threshold=search_config.get("similarity_threshold", 0.1),
|
||||
vector_similarity_weight=search_config.get("vector_similarity_weight", 0.3),
|
||||
top=search_config.get("top_k", 1024),
|
||||
doc_ids=doc_ids,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(question, kbs)
|
||||
)
|
||||
|
||||
knowledges = kb_prompt(kbinfos, max_tokens)
|
||||
sys_prompt = PROMPT_JINJA_ENV.from_string(ASK_SUMMARY).render(knowledge="\n".join(knowledges))
|
||||
|
||||
msg = [{"role": "user", "content": question}]
|
||||
|
||||
def decorate_answer(answer):
|
||||
nonlocal knowledges, kbinfos, sys_prompt
|
||||
answer, idx = retriever.insert_citations(answer, [ck["content_ltks"] for ck in kbinfos["chunks"]], [ck["vector"] for ck in kbinfos["chunks"]],
|
||||
embd_mdl, tkweight=0.7, vtweight=0.3)
|
||||
idx = set([kbinfos["chunks"][int(i)]["doc_id"] for i in idx])
|
||||
recall_docs = [d for d in kbinfos["doc_aggs"] if d["doc_id"] in idx]
|
||||
if not recall_docs:
|
||||
recall_docs = kbinfos["doc_aggs"]
|
||||
kbinfos["doc_aggs"] = recall_docs
|
||||
refs = deepcopy(kbinfos)
|
||||
for c in refs["chunks"]:
|
||||
if c.get("vector"):
|
||||
del c["vector"]
|
||||
|
||||
if answer.lower().find("invalid key") >= 0 or answer.lower().find("invalid api") >= 0:
|
||||
answer += " Please set LLM API-Key in 'User Setting -> Model Providers -> API-Key'"
|
||||
refs["chunks"] = chunks_format(refs)
|
||||
return {"answer": answer, "reference": refs}
|
||||
|
||||
answer = ""
|
||||
for ans in chat_mdl.chat_streamly(sys_prompt, msg, {"temperature": 0.1}):
|
||||
answer = ans
|
||||
yield {"answer": answer, "reference": {}}
|
||||
yield decorate_answer(answer)
|
||||
|
||||
|
||||
def gen_mindmap(question, kb_ids, tenant_id, search_config={}):
|
||||
meta_data_filter = search_config.get("meta_data_filter", {})
|
||||
doc_ids = search_config.get("doc_ids", [])
|
||||
rerank_id = search_config.get("rerank_id", "")
|
||||
rerank_mdl = None
|
||||
kbs = KnowledgebaseService.get_by_ids(kb_ids)
|
||||
if not kbs:
|
||||
return {"error": "No KB selected"}
|
||||
embedding_list = list(set([kb.embd_id for kb in kbs]))
|
||||
tenant_ids = list(set([kb.tenant_id for kb in kbs]))
|
||||
|
||||
embd_mdl = LLMBundle(tenant_id, LLMType.EMBEDDING, llm_name=embedding_list[0])
|
||||
chat_mdl = LLMBundle(tenant_id, LLMType.CHAT, llm_name=search_config.get("chat_id", ""))
|
||||
if rerank_id:
|
||||
rerank_mdl = LLMBundle(tenant_id, LLMType.RERANK, rerank_id)
|
||||
|
||||
if meta_data_filter:
|
||||
metas = DocumentService.get_meta_by_kbs(kb_ids)
|
||||
if meta_data_filter.get("method") == "auto":
|
||||
filters = gen_meta_filter(chat_mdl, metas, question)
|
||||
doc_ids.extend(meta_filter(metas, filters))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
elif meta_data_filter.get("method") == "manual":
|
||||
doc_ids.extend(meta_filter(metas, meta_data_filter["manual"]))
|
||||
if not doc_ids:
|
||||
doc_ids = None
|
||||
|
||||
ranks = settings.retrievaler.retrieval(
|
||||
question=question,
|
||||
embd_mdl=embd_mdl,
|
||||
tenant_ids=tenant_ids,
|
||||
kb_ids=kb_ids,
|
||||
page=1,
|
||||
page_size=12,
|
||||
similarity_threshold=search_config.get("similarity_threshold", 0.2),
|
||||
vector_similarity_weight=search_config.get("vector_similarity_weight", 0.3),
|
||||
top=search_config.get("top_k", 1024),
|
||||
doc_ids=doc_ids,
|
||||
aggs=False,
|
||||
rerank_mdl=rerank_mdl,
|
||||
rank_feature=label_question(question, kbs),
|
||||
)
|
||||
mindmap = MindMapExtractor(chat_mdl)
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in ranks["chunks"]])
|
||||
return mind_map.output
|
||||
975
api/db/services/document_service.py
Normal file
975
api/db/services/document_service.py
Normal file
@@ -0,0 +1,975 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from copy import deepcopy
|
||||
from datetime import datetime
|
||||
from io import BytesIO
|
||||
|
||||
import trio
|
||||
import xxhash
|
||||
from peewee import fn, Case, JOIN
|
||||
|
||||
from api import settings
|
||||
from api.constants import IMG_BASE64_PREFIX, FILE_NAME_LEN_LIMIT
|
||||
from api.db import FileType, LLMType, ParserType, StatusEnum, TaskStatus, UserTenantRole, CanvasCategory
|
||||
from api.db.db_models import DB, Document, Knowledgebase, Task, Tenant, UserTenant, File2Document, File, UserCanvas, \
|
||||
User
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.utils import current_timestamp, get_format_time, get_uuid
|
||||
from rag.nlp import rag_tokenizer, search
|
||||
from rag.settings import get_svr_queue_name, SVR_CONSUMER_GROUP_NAME
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.doc_store_conn import OrderByExpr
|
||||
|
||||
|
||||
class DocumentService(CommonService):
|
||||
model = Document
|
||||
|
||||
@classmethod
|
||||
def get_cls_model_fields(cls):
|
||||
return [
|
||||
cls.model.id,
|
||||
cls.model.thumbnail,
|
||||
cls.model.kb_id,
|
||||
cls.model.parser_id,
|
||||
cls.model.pipeline_id,
|
||||
cls.model.parser_config,
|
||||
cls.model.source_type,
|
||||
cls.model.type,
|
||||
cls.model.created_by,
|
||||
cls.model.name,
|
||||
cls.model.location,
|
||||
cls.model.size,
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.progress,
|
||||
cls.model.progress_msg,
|
||||
cls.model.process_begin_at,
|
||||
cls.model.process_duration,
|
||||
cls.model.meta_fields,
|
||||
cls.model.suffix,
|
||||
cls.model.run,
|
||||
cls.model.status,
|
||||
cls.model.create_time,
|
||||
cls.model.create_date,
|
||||
cls.model.update_time,
|
||||
cls.model.update_date,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, kb_id, page_number, items_per_page,
|
||||
orderby, desc, keywords, id, name):
|
||||
fields = cls.get_cls_model_fields()
|
||||
docs = cls.model.select(*[*fields, UserCanvas.title]).join(File2Document, on = (File2Document.document_id == cls.model.id))\
|
||||
.join(File, on = (File.id == File2Document.file_id))\
|
||||
.join(UserCanvas, on = ((cls.model.pipeline_id == UserCanvas.id) & (UserCanvas.canvas_category == CanvasCategory.DataFlow.value)), join_type=JOIN.LEFT_OUTER)\
|
||||
.where(cls.model.kb_id == kb_id)
|
||||
if id:
|
||||
docs = docs.where(
|
||||
cls.model.id == id)
|
||||
if name:
|
||||
docs = docs.where(
|
||||
cls.model.name == name
|
||||
)
|
||||
if keywords:
|
||||
docs = docs.where(
|
||||
fn.LOWER(cls.model.name).contains(keywords.lower())
|
||||
)
|
||||
if desc:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = docs.count()
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def check_doc_health(cls, tenant_id: str, filename):
|
||||
import os
|
||||
MAX_FILE_NUM_PER_USER = int(os.environ.get("MAX_FILE_NUM_PER_USER", 0))
|
||||
if MAX_FILE_NUM_PER_USER > 0 and DocumentService.get_doc_count(tenant_id) >= MAX_FILE_NUM_PER_USER:
|
||||
raise RuntimeError("Exceed the maximum file number of a free user!")
|
||||
if len(filename.encode("utf-8")) > FILE_NAME_LEN_LIMIT:
|
||||
raise RuntimeError("Exceed the maximum length of file name!")
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_kb_id(cls, kb_id, page_number, items_per_page,
|
||||
orderby, desc, keywords, run_status, types, suffix):
|
||||
fields = cls.get_cls_model_fields()
|
||||
if keywords:
|
||||
docs = cls.model.select(*[*fields, UserCanvas.title.alias("pipeline_name"), User.nickname])\
|
||||
.join(File2Document, on=(File2Document.document_id == cls.model.id))\
|
||||
.join(File, on=(File.id == File2Document.file_id))\
|
||||
.join(UserCanvas, on=(cls.model.pipeline_id == UserCanvas.id), join_type=JOIN.LEFT_OUTER)\
|
||||
.join(User, on=(cls.model.created_by == User.id), join_type=JOIN.LEFT_OUTER)\
|
||||
.where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select(*[*fields, UserCanvas.title.alias("pipeline_name"), User.nickname])\
|
||||
.join(File2Document, on=(File2Document.document_id == cls.model.id))\
|
||||
.join(UserCanvas, on=(cls.model.pipeline_id == UserCanvas.id), join_type=JOIN.LEFT_OUTER)\
|
||||
.join(File, on=(File.id == File2Document.file_id))\
|
||||
.join(User, on=(cls.model.created_by == User.id), join_type=JOIN.LEFT_OUTER)\
|
||||
.where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
if suffix:
|
||||
docs = docs.where(cls.model.suffix.in_(suffix))
|
||||
|
||||
count = docs.count()
|
||||
if desc:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
docs = docs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
|
||||
if page_number and items_per_page:
|
||||
docs = docs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(docs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_filter_by_kb_id(cls, kb_id, keywords, run_status, types, suffix):
|
||||
"""
|
||||
returns:
|
||||
{
|
||||
"suffix": {
|
||||
"ppt": 1,
|
||||
"doxc": 2
|
||||
},
|
||||
"run_status": {
|
||||
"1": 2,
|
||||
"2": 2
|
||||
}
|
||||
}, total
|
||||
where "1" => RUNNING, "2" => CANCEL
|
||||
"""
|
||||
fields = cls.get_cls_model_fields()
|
||||
if keywords:
|
||||
query = cls.model.select(*fields).join(File2Document, on=(File2Document.document_id == cls.model.id)).join(File, on=(File.id == File2Document.file_id)).where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
query = cls.model.select(*fields).join(File2Document, on=(File2Document.document_id == cls.model.id)).join(File, on=(File.id == File2Document.file_id)).where(cls.model.kb_id == kb_id)
|
||||
|
||||
|
||||
if run_status:
|
||||
query = query.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
query = query.where(cls.model.type.in_(types))
|
||||
if suffix:
|
||||
query = query.where(cls.model.suffix.in_(suffix))
|
||||
|
||||
rows = query.select(cls.model.run, cls.model.suffix)
|
||||
total = rows.count()
|
||||
|
||||
suffix_counter = {}
|
||||
run_status_counter = {}
|
||||
|
||||
for row in rows:
|
||||
suffix_counter[row.suffix] = suffix_counter.get(row.suffix, 0) + 1
|
||||
run_status_counter[str(row.run)] = run_status_counter.get(str(row.run), 0) + 1
|
||||
|
||||
return {
|
||||
"suffix": suffix_counter,
|
||||
"run_status": run_status_counter
|
||||
}, total
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def count_by_kb_id(cls, kb_id, keywords, run_status, types):
|
||||
if keywords:
|
||||
docs = cls.model.select().where(
|
||||
(cls.model.kb_id == kb_id),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
docs = cls.model.select().where(cls.model.kb_id == kb_id)
|
||||
|
||||
if run_status:
|
||||
docs = docs.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
docs = docs.where(cls.model.type.in_(types))
|
||||
|
||||
count = docs.count()
|
||||
|
||||
return count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_total_size_by_kb_id(cls, kb_id, keywords="", run_status=[], types=[]):
|
||||
query = cls.model.select(fn.COALESCE(fn.SUM(cls.model.size), 0)).where(
|
||||
cls.model.kb_id == kb_id
|
||||
)
|
||||
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if run_status:
|
||||
query = query.where(cls.model.run.in_(run_status))
|
||||
if types:
|
||||
query = query.where(cls.model.type.in_(types))
|
||||
|
||||
return int(query.scalar()) or 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_doc_ids_by_kb_ids(cls, kb_ids):
|
||||
fields = [cls.model.id]
|
||||
docs = cls.model.select(*fields).where(cls.model.kb_id.in_(kb_ids))
|
||||
docs.order_by(cls.model.create_time.asc())
|
||||
# maybe cause slow query by deep paginate, optimize later
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
doc_batch = docs.offset(offset).limit(limit)
|
||||
_temp = list(doc_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_docs_by_creator_id(cls, creator_id):
|
||||
fields = [
|
||||
cls.model.id, cls.model.kb_id, cls.model.token_num, cls.model.chunk_num, Knowledgebase.tenant_id
|
||||
]
|
||||
docs = cls.model.select(*fields).join(Knowledgebase, on=(Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.created_by == creator_id
|
||||
)
|
||||
docs.order_by(cls.model.create_time.asc())
|
||||
# maybe cause slow query by deep paginate, optimize later
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
doc_batch = docs.offset(offset).limit(limit)
|
||||
_temp = list(doc_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, doc):
|
||||
if not cls.save(**doc):
|
||||
raise RuntimeError("Database error (Document)!")
|
||||
if not KnowledgebaseService.atomic_increase_doc_num_by_id(doc["kb_id"]):
|
||||
raise RuntimeError("Database error (Knowledgebase)!")
|
||||
return Document(**doc)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def remove_document(cls, doc, tenant_id):
|
||||
from api.db.services.task_service import TaskService
|
||||
cls.clear_chunk_num(doc.id)
|
||||
try:
|
||||
TaskService.filter_delete([Task.doc_id == doc.id])
|
||||
page = 0
|
||||
page_size = 1000
|
||||
all_chunk_ids = []
|
||||
while True:
|
||||
chunks = settings.docStoreConn.search(["img_id"], [], {"doc_id": doc.id}, [], OrderByExpr(),
|
||||
page * page_size, page_size, search.index_name(tenant_id),
|
||||
[doc.kb_id])
|
||||
chunk_ids = settings.docStoreConn.getChunkIds(chunks)
|
||||
if not chunk_ids:
|
||||
break
|
||||
all_chunk_ids.extend(chunk_ids)
|
||||
page += 1
|
||||
for cid in all_chunk_ids:
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, cid):
|
||||
STORAGE_IMPL.rm(doc.kb_id, cid)
|
||||
if doc.thumbnail and not doc.thumbnail.startswith(IMG_BASE64_PREFIX):
|
||||
if STORAGE_IMPL.obj_exist(doc.kb_id, doc.thumbnail):
|
||||
STORAGE_IMPL.rm(doc.kb_id, doc.thumbnail)
|
||||
settings.docStoreConn.delete({"doc_id": doc.id}, search.index_name(tenant_id), doc.kb_id)
|
||||
|
||||
graph_source = settings.docStoreConn.getFields(
|
||||
settings.docStoreConn.search(["source_id"], [], {"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]}, [], OrderByExpr(), 0, 1, search.index_name(tenant_id), [doc.kb_id]), ["source_id"]
|
||||
)
|
||||
if len(graph_source) > 0 and doc.id in list(graph_source.values())[0]["source_id"]:
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "source_id": doc.id},
|
||||
{"remove": {"source_id": doc.id}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.update({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["graph"]},
|
||||
{"removed_kwd": "Y"},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
settings.docStoreConn.delete({"kb_id": doc.kb_id, "knowledge_graph_kwd": ["entity", "relation", "graph", "subgraph", "community_report"], "must_not": {"exists": "source_id"}},
|
||||
search.index_name(tenant_id), doc.kb_id)
|
||||
except Exception:
|
||||
pass
|
||||
return cls.delete_by_id(doc.id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_newly_uploaded(cls):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.kb_id,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
cls.model.name,
|
||||
cls.model.type,
|
||||
cls.model.location,
|
||||
cls.model.size,
|
||||
Knowledgebase.tenant_id,
|
||||
Tenant.embd_id,
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
cls.model.update_time]
|
||||
docs = cls.model.select(*fields) \
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id)) \
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id)) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress == 0,
|
||||
cls.model.update_time >= current_timestamp() - 1000 * 600,
|
||||
cls.model.run == TaskStatus.RUNNING.value) \
|
||||
.order_by(cls.model.update_time.asc())
|
||||
return list(docs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_unfinished_docs(cls):
|
||||
fields = [cls.model.id, cls.model.process_begin_at, cls.model.parser_config, cls.model.progress_msg,
|
||||
cls.model.run, cls.model.parser_id]
|
||||
docs = cls.model.select(*fields) \
|
||||
.where(
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
~(cls.model.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.progress > 0)
|
||||
return list(docs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def increment_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duration):
|
||||
num = cls.model.update(token_num=cls.model.token_num + token_num,
|
||||
chunk_num=cls.model.chunk_num + chunk_num,
|
||||
process_duration=cls.model.process_duration + duration).where(
|
||||
cls.model.id == doc_id).execute()
|
||||
if num == 0:
|
||||
logging.warning("Document not found which is supposed to be there")
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num +
|
||||
token_num,
|
||||
chunk_num=Knowledgebase.chunk_num +
|
||||
chunk_num).where(
|
||||
Knowledgebase.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def decrement_chunk_num(cls, doc_id, kb_id, token_num, chunk_num, duration):
|
||||
num = cls.model.update(token_num=cls.model.token_num - token_num,
|
||||
chunk_num=cls.model.chunk_num - chunk_num,
|
||||
process_duration=cls.model.process_duration + duration).where(
|
||||
cls.model.id == doc_id).execute()
|
||||
if num == 0:
|
||||
raise LookupError(
|
||||
"Document not found which is supposed to be there")
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num -
|
||||
token_num,
|
||||
chunk_num=Knowledgebase.chunk_num -
|
||||
chunk_num
|
||||
).where(
|
||||
Knowledgebase.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def clear_chunk_num(cls, doc_id):
|
||||
doc = cls.model.get_by_id(doc_id)
|
||||
assert doc, "Can't fine document in database."
|
||||
|
||||
num = Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num -
|
||||
doc.token_num,
|
||||
chunk_num=Knowledgebase.chunk_num -
|
||||
doc.chunk_num,
|
||||
doc_num=Knowledgebase.doc_num - 1
|
||||
).where(
|
||||
Knowledgebase.id == doc.kb_id).execute()
|
||||
return num
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def clear_chunk_num_when_rerun(cls, doc_id):
|
||||
doc = cls.model.get_by_id(doc_id)
|
||||
assert doc, "Can't fine document in database."
|
||||
|
||||
num = (
|
||||
Knowledgebase.update(
|
||||
token_num=Knowledgebase.token_num - doc.token_num,
|
||||
chunk_num=Knowledgebase.chunk_num - doc.chunk_num,
|
||||
)
|
||||
.where(Knowledgebase.id == doc.kb_id)
|
||||
.execute()
|
||||
)
|
||||
return num
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenant_id(cls, doc_id):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.tenant_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_knowledgebase_id(cls, doc_id):
|
||||
docs = cls.model.select(cls.model.kb_id).where(cls.model.id == doc_id)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["kb_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenant_id_by_name(cls, name):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.tenant_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.name == name, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["tenant_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible(cls, doc_id, user_id):
|
||||
docs = cls.model.select(
|
||||
cls.model.id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)
|
||||
).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == doc_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, doc_id, user_id):
|
||||
docs = cls.model.select(cls.model.id
|
||||
).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)
|
||||
).join(
|
||||
UserTenant, on=(
|
||||
(UserTenant.tenant_id == Knowledgebase.created_by) & (UserTenant.user_id == user_id))
|
||||
).where(
|
||||
cls.model.id == doc_id,
|
||||
UserTenant.status == StatusEnum.VALID.value,
|
||||
((UserTenant.role == UserTenantRole.NORMAL) | (UserTenant.role == UserTenantRole.OWNER))
|
||||
).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_embd_id(cls, doc_id):
|
||||
docs = cls.model.select(
|
||||
Knowledgebase.embd_id).join(
|
||||
Knowledgebase, on=(
|
||||
Knowledgebase.id == cls.model.kb_id)).where(
|
||||
cls.model.id == doc_id, Knowledgebase.status == StatusEnum.VALID.value)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return
|
||||
return docs[0]["embd_id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_chunking_config(cls, doc_id):
|
||||
configs = (
|
||||
cls.model.select(
|
||||
cls.model.id,
|
||||
cls.model.kb_id,
|
||||
cls.model.parser_id,
|
||||
cls.model.parser_config,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Tenant.id.alias("tenant_id"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
)
|
||||
.join(Knowledgebase, on=(cls.model.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == doc_id)
|
||||
)
|
||||
configs = configs.dicts()
|
||||
if not configs:
|
||||
return None
|
||||
return configs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_id_by_doc_name(cls, doc_name):
|
||||
fields = [cls.model.id]
|
||||
doc_id = cls.model.select(*fields) \
|
||||
.where(cls.model.name == doc_name)
|
||||
doc_id = doc_id.dicts()
|
||||
if not doc_id:
|
||||
return
|
||||
return doc_id[0]["id"]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_ids_by_doc_names(cls, doc_names):
|
||||
if not doc_names:
|
||||
return []
|
||||
|
||||
query = cls.model.select(cls.model.id).where(cls.model.name.in_(doc_names))
|
||||
return list(query.scalars().iterator())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_thumbnails(cls, docids):
|
||||
fields = [cls.model.id, cls.model.kb_id, cls.model.thumbnail]
|
||||
return list(cls.model.select(
|
||||
*fields).where(cls.model.id.in_(docids)).dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_parser_config(cls, id, config):
|
||||
if not config:
|
||||
return
|
||||
e, d = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"Document({id}) not found.")
|
||||
|
||||
def dfs_update(old, new):
|
||||
for k, v in new.items():
|
||||
if k not in old:
|
||||
old[k] = v
|
||||
continue
|
||||
if isinstance(v, dict):
|
||||
assert isinstance(old[k], dict)
|
||||
dfs_update(old[k], v)
|
||||
else:
|
||||
old[k] = v
|
||||
|
||||
dfs_update(d.parser_config, config)
|
||||
if not config.get("raptor") and d.parser_config.get("raptor"):
|
||||
del d.parser_config["raptor"]
|
||||
cls.update_by_id(id, {"parser_config": d.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_doc_count(cls, tenant_id):
|
||||
docs = cls.model.select(cls.model.id).join(Knowledgebase,
|
||||
on=(Knowledgebase.id == cls.model.kb_id)).where(
|
||||
Knowledgebase.tenant_id == tenant_id)
|
||||
return len(docs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def begin2parse(cls, docid):
|
||||
cls.update_by_id(
|
||||
docid, {"progress": random.random() * 1 / 100.,
|
||||
"progress_msg": "Task is queued...",
|
||||
"process_begin_at": get_format_time()
|
||||
})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_meta_fields(cls, doc_id, meta_fields):
|
||||
return cls.update_by_id(doc_id, {"meta_fields": meta_fields})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_meta_by_kbs(cls, kb_ids):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.meta_fields,
|
||||
]
|
||||
meta = {}
|
||||
for r in cls.model.select(*fields).where(cls.model.kb_id.in_(kb_ids)):
|
||||
doc_id = r.id
|
||||
for k,v in r.meta_fields.items():
|
||||
if k not in meta:
|
||||
meta[k] = {}
|
||||
v = str(v)
|
||||
if v not in meta[k]:
|
||||
meta[k][v] = []
|
||||
meta[k][v].append(doc_id)
|
||||
return meta
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls):
|
||||
docs = cls.get_unfinished_docs()
|
||||
|
||||
cls._sync_progress(docs)
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress_immediately(cls, docs:list[dict]):
|
||||
if not docs:
|
||||
return
|
||||
|
||||
cls._sync_progress(docs)
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def _sync_progress(cls, docs:list[dict]):
|
||||
for d in docs:
|
||||
try:
|
||||
tsks = Task.query(doc_id=d["id"], order_by=Task.create_time)
|
||||
if not tsks:
|
||||
continue
|
||||
msg = []
|
||||
prg = 0
|
||||
finished = True
|
||||
bad = 0
|
||||
e, doc = DocumentService.get_by_id(d["id"])
|
||||
status = doc.run # TaskStatus.RUNNING.value
|
||||
priority = 0
|
||||
for t in tsks:
|
||||
if 0 <= t.progress < 1:
|
||||
finished = False
|
||||
if t.progress == -1:
|
||||
bad += 1
|
||||
prg += t.progress if t.progress >= 0 else 0
|
||||
if t.progress_msg.strip():
|
||||
msg.append(t.progress_msg)
|
||||
priority = max(priority, t.priority)
|
||||
prg /= len(tsks)
|
||||
if finished and bad:
|
||||
prg = -1
|
||||
status = TaskStatus.FAIL.value
|
||||
elif finished:
|
||||
prg = 1
|
||||
status = TaskStatus.DONE.value
|
||||
|
||||
msg = "\n".join(sorted(msg))
|
||||
info = {
|
||||
"process_duration": datetime.timestamp(
|
||||
datetime.now()) -
|
||||
d["process_begin_at"].timestamp(),
|
||||
"run": status}
|
||||
if prg != 0:
|
||||
info["progress"] = prg
|
||||
if msg:
|
||||
info["progress_msg"] = msg
|
||||
if msg.endswith("created task graphrag") or msg.endswith("created task raptor") or msg.endswith("created task mindmap"):
|
||||
info["progress_msg"] += "\n%d tasks are ahead in the queue..."%get_queue_length(priority)
|
||||
else:
|
||||
info["progress_msg"] = "%d tasks are ahead in the queue..."%get_queue_length(priority)
|
||||
cls.update_by_id(d["id"], info)
|
||||
except Exception as e:
|
||||
if str(e).find("'0'") < 0:
|
||||
logging.exception("fetch task exception")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_doc_count(cls, kb_id):
|
||||
return cls.model.select().where(cls.model.kb_id == kb_id).count()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_kb_doc_count(cls):
|
||||
result = {}
|
||||
rows = cls.model.select(cls.model.kb_id, fn.COUNT(cls.model.id).alias('count')).group_by(cls.model.kb_id)
|
||||
for row in rows:
|
||||
result[row.kb_id] = row.count
|
||||
return result
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, doc_id):
|
||||
try:
|
||||
_, doc = DocumentService.get_by_id(doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
except Exception:
|
||||
pass
|
||||
return False
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def knowledgebase_basic_info(cls, kb_id: str) -> dict[str, int]:
|
||||
# cancelled: run == "2" but progress can vary
|
||||
cancelled = (
|
||||
cls.model.select(fn.COUNT(1))
|
||||
.where((cls.model.kb_id == kb_id) & (cls.model.run == TaskStatus.CANCEL))
|
||||
.scalar()
|
||||
)
|
||||
|
||||
row = (
|
||||
cls.model.select(
|
||||
# finished: progress == 1
|
||||
fn.COALESCE(fn.SUM(Case(None, [(cls.model.progress == 1, 1)], 0)), 0).alias("finished"),
|
||||
|
||||
# failed: progress == -1
|
||||
fn.COALESCE(fn.SUM(Case(None, [(cls.model.progress == -1, 1)], 0)), 0).alias("failed"),
|
||||
|
||||
# processing: 0 <= progress < 1
|
||||
fn.COALESCE(
|
||||
fn.SUM(
|
||||
Case(
|
||||
None,
|
||||
[
|
||||
(((cls.model.progress == 0) | ((cls.model.progress > 0) & (cls.model.progress < 1))), 1),
|
||||
],
|
||||
0,
|
||||
)
|
||||
),
|
||||
0,
|
||||
).alias("processing"),
|
||||
)
|
||||
.where(
|
||||
(cls.model.kb_id == kb_id)
|
||||
& ((cls.model.run.is_null(True)) | (cls.model.run != TaskStatus.CANCEL))
|
||||
)
|
||||
.dicts()
|
||||
.get()
|
||||
)
|
||||
|
||||
return {
|
||||
"processing": int(row["processing"]),
|
||||
"finished": int(row["finished"]),
|
||||
"failed": int(row["failed"]),
|
||||
"cancelled": int(cancelled),
|
||||
}
|
||||
|
||||
def queue_raptor_o_graphrag_tasks(doc, ty, priority, fake_doc_id="", doc_ids=[]):
|
||||
"""
|
||||
You can provide a fake_doc_id to bypass the restriction of tasks at the knowledgebase level.
|
||||
Optionally, specify a list of doc_ids to determine which documents participate in the task.
|
||||
"""
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
|
||||
def new_task():
|
||||
nonlocal doc
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": fake_doc_id if fake_doc_id else doc["id"],
|
||||
"from_page": 100000000,
|
||||
"to_page": 100000000,
|
||||
"task_type": ty,
|
||||
"progress_msg": datetime.now().strftime("%H:%M:%S") + " created task " + ty,
|
||||
"begin_at": datetime.now(),
|
||||
}
|
||||
|
||||
task = new_task()
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
hasher.update(ty.encode("utf-8"))
|
||||
task["digest"] = hasher.hexdigest()
|
||||
bulk_insert_into_db(Task, [task], True)
|
||||
|
||||
if ty in ["graphrag", "raptor", "mindmap"]:
|
||||
task["doc_ids"] = doc_ids
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
assert REDIS_CONN.queue_product(get_svr_queue_name(priority), message=task), "Can't access Redis. Please check the Redis' status."
|
||||
return task["id"]
|
||||
|
||||
|
||||
def get_queue_length(priority):
|
||||
group_info = REDIS_CONN.queue_info(get_svr_queue_name(priority), SVR_CONSUMER_GROUP_NAME)
|
||||
if not group_info:
|
||||
return 0
|
||||
return int(group_info.get("lag", 0) or 0)
|
||||
|
||||
|
||||
async def doc_upload_and_parse(conversation_id, file_objs, user_id):
|
||||
from api.db.services.api_service import API4ConversationService
|
||||
from api.db.services.conversation_service import ConversationService
|
||||
from api.db.services.dialog_service import DialogService
|
||||
from api.db.services.file_service import FileService
|
||||
from api.db.services.llm_service import LLMBundle
|
||||
from api.db.services.user_service import TenantService
|
||||
from rag.app import audio, email, naive, picture, presentation
|
||||
|
||||
e, conv = ConversationService.get_by_id(conversation_id)
|
||||
if not e:
|
||||
e, conv = API4ConversationService.get_by_id(conversation_id)
|
||||
assert e, "Conversation not found!"
|
||||
|
||||
e, dia = DialogService.get_by_id(conv.dialog_id)
|
||||
if not dia.kb_ids:
|
||||
raise LookupError("No knowledge base associated with this conversation. "
|
||||
"Please add a knowledge base before uploading documents")
|
||||
kb_id = dia.kb_ids[0]
|
||||
e, kb = KnowledgebaseService.get_by_id(kb_id)
|
||||
if not e:
|
||||
raise LookupError("Can't find this knowledgebase!")
|
||||
|
||||
embd_mdl = LLMBundle(kb.tenant_id, LLMType.EMBEDDING, llm_name=kb.embd_id, lang=kb.language)
|
||||
|
||||
err, files = await FileService.upload_document(kb, file_objs, user_id)
|
||||
assert not err, "\n".join(err)
|
||||
|
||||
def dummy(prog=None, msg=""):
|
||||
pass
|
||||
|
||||
FACTORY = {
|
||||
ParserType.PRESENTATION.value: presentation,
|
||||
ParserType.PICTURE.value: picture,
|
||||
ParserType.AUDIO.value: audio,
|
||||
ParserType.EMAIL.value: email
|
||||
}
|
||||
parser_config = {"chunk_token_num": 4096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
doc_nm = {}
|
||||
for d, blob in files:
|
||||
doc_nm[d["id"]] = d["name"]
|
||||
for d, blob in files:
|
||||
kwargs = {
|
||||
"callback": dummy,
|
||||
"parser_config": parser_config,
|
||||
"from_page": 0,
|
||||
"to_page": 100000,
|
||||
"tenant_id": kb.tenant_id,
|
||||
"lang": kb.language
|
||||
}
|
||||
threads.append(exe.submit(FACTORY.get(d["parser_id"], naive).chunk, d["name"], blob, **kwargs))
|
||||
|
||||
for (docinfo, _), th in zip(files, threads):
|
||||
docs = []
|
||||
doc = {
|
||||
"doc_id": docinfo["id"],
|
||||
"kb_id": [kb.id]
|
||||
}
|
||||
for ck in th.result():
|
||||
d = deepcopy(doc)
|
||||
d.update(ck)
|
||||
d["id"] = xxhash.xxh64((ck["content_with_weight"] + str(d["doc_id"])).encode("utf-8")).hexdigest()
|
||||
d["create_time"] = str(datetime.now()).replace("T", " ")[:19]
|
||||
d["create_timestamp_flt"] = datetime.now().timestamp()
|
||||
if not d.get("image"):
|
||||
docs.append(d)
|
||||
continue
|
||||
|
||||
output_buffer = BytesIO()
|
||||
if isinstance(d["image"], bytes):
|
||||
output_buffer = BytesIO(d["image"])
|
||||
else:
|
||||
d["image"].save(output_buffer, format='JPEG')
|
||||
|
||||
STORAGE_IMPL.put(kb.id, d["id"], output_buffer.getvalue())
|
||||
d["img_id"] = "{}-{}".format(kb.id, d["id"])
|
||||
d.pop("image", None)
|
||||
docs.append(d)
|
||||
|
||||
parser_ids = {d["id"]: d["parser_id"] for d, _ in files}
|
||||
docids = [d["id"] for d, _ in files]
|
||||
chunk_counts = {id: 0 for id in docids}
|
||||
token_counts = {id: 0 for id in docids}
|
||||
es_bulk_size = 64
|
||||
|
||||
def embedding(doc_id, cnts, batch_size=16):
|
||||
nonlocal embd_mdl, chunk_counts, token_counts
|
||||
vects = []
|
||||
for i in range(0, len(cnts), batch_size):
|
||||
vts, c = embd_mdl.encode(cnts[i: i + batch_size])
|
||||
vects.extend(vts.tolist())
|
||||
chunk_counts[doc_id] += len(cnts[i:i + batch_size])
|
||||
token_counts[doc_id] += c
|
||||
return vects
|
||||
|
||||
idxnm = search.index_name(kb.tenant_id)
|
||||
try_create_idx = True
|
||||
|
||||
_, tenant = TenantService.get_by_id(kb.tenant_id)
|
||||
llm_bdl = LLMBundle(kb.tenant_id, LLMType.CHAT, tenant.llm_id)
|
||||
for doc_id in docids:
|
||||
cks = [c for c in docs if c["doc_id"] == doc_id]
|
||||
|
||||
if parser_ids[doc_id] != ParserType.PICTURE.value:
|
||||
from graphrag.general.mind_map_extractor import MindMapExtractor
|
||||
mindmap = MindMapExtractor(llm_bdl)
|
||||
try:
|
||||
mind_map = trio.run(mindmap, [c["content_with_weight"] for c in docs if c["doc_id"] == doc_id])
|
||||
mind_map = json.dumps(mind_map.output, ensure_ascii=False, indent=2)
|
||||
if len(mind_map) < 32:
|
||||
raise Exception("Few content: " + mind_map)
|
||||
cks.append({
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc_id,
|
||||
"kb_id": [kb.id],
|
||||
"docnm_kwd": doc_nm[doc_id],
|
||||
"title_tks": rag_tokenizer.tokenize(re.sub(r"\.[a-zA-Z]+$", "", doc_nm[doc_id])),
|
||||
"content_ltks": rag_tokenizer.tokenize("summary summarize 总结 概况 file 文件 概括"),
|
||||
"content_with_weight": mind_map,
|
||||
"knowledge_graph_kwd": "mind_map"
|
||||
})
|
||||
except Exception as e:
|
||||
logging.exception("Mind map generation error")
|
||||
|
||||
vects = embedding(doc_id, [c["content_with_weight"] for c in cks])
|
||||
assert len(cks) == len(vects)
|
||||
for i, d in enumerate(cks):
|
||||
v = vects[i]
|
||||
d["q_%d_vec" % len(v)] = v
|
||||
for b in range(0, len(cks), es_bulk_size):
|
||||
if try_create_idx:
|
||||
if not settings.docStoreConn.indexExist(idxnm, kb_id):
|
||||
settings.docStoreConn.createIdx(idxnm, kb_id, len(vects[0]))
|
||||
try_create_idx = False
|
||||
settings.docStoreConn.insert(cks[b:b + es_bulk_size], idxnm, kb_id)
|
||||
|
||||
DocumentService.increment_chunk_num(
|
||||
doc_id, kb.id, token_counts[doc_id], chunk_counts[doc_id], 0)
|
||||
|
||||
return [d["id"] for d, _ in files]
|
||||
|
||||
96
api/db/services/file2document_service.py
Normal file
96
api/db/services/file2document_service.py
Normal file
@@ -0,0 +1,96 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
from api.db import FileSource
|
||||
from api.db.db_models import DB
|
||||
from api.db.db_models import File, File2Document
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class File2DocumentService(CommonService):
|
||||
model = File2Document
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_file_id(cls, file_id):
|
||||
objs = cls.model.select().where(cls.model.file_id == file_id)
|
||||
return objs
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_document_id(cls, document_id):
|
||||
objs = cls.model.select().where(cls.model.document_id == document_id)
|
||||
return objs
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_document_ids(cls, document_ids):
|
||||
objs = cls.model.select().where(cls.model.document_id.in_(document_ids))
|
||||
return list(objs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, obj):
|
||||
if not cls.save(**obj):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_file_id(cls, file_id):
|
||||
return cls.model.delete().where(cls.model.file_id == file_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_document_ids_or_file_ids(cls, document_ids, file_ids):
|
||||
if not document_ids:
|
||||
return cls.model.delete().where(cls.model.file_id.in_(file_ids)).execute()
|
||||
elif not file_ids:
|
||||
return cls.model.delete().where(cls.model.document_id.in_(document_ids)).execute()
|
||||
return cls.model.delete().where(cls.model.document_id.in_(document_ids) | cls.model.file_id.in_(file_ids)).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_document_id(cls, doc_id):
|
||||
return cls.model.delete().where(cls.model.document_id == doc_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_by_file_id(cls, file_id, obj):
|
||||
obj["update_time"] = current_timestamp()
|
||||
obj["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(obj).where(cls.model.id == file_id).execute()
|
||||
return File2Document(**obj)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_storage_address(cls, doc_id=None, file_id=None):
|
||||
if doc_id:
|
||||
f2d = cls.get_by_document_id(doc_id)
|
||||
else:
|
||||
f2d = cls.get_by_file_id(file_id)
|
||||
if f2d:
|
||||
file = File.get_by_id(f2d[0].file_id)
|
||||
if not file.source_type or file.source_type == FileSource.LOCAL:
|
||||
return file.parent_id, file.location
|
||||
doc_id = f2d[0].document_id
|
||||
|
||||
assert doc_id, "please specify doc_id"
|
||||
e, doc = DocumentService.get_by_id(doc_id)
|
||||
return doc.kb_id, doc.location
|
||||
547
api/db/services/file_service.py
Normal file
547
api/db/services/file_service.py
Normal file
@@ -0,0 +1,547 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import re
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from pathlib import Path
|
||||
|
||||
from flask_login import current_user
|
||||
from peewee import fn
|
||||
|
||||
from api.db import KNOWLEDGEBASE_FOLDER_NAME, FileSource, FileType, ParserType
|
||||
from api.db.db_models import DB, Document, File, File2Document, Knowledgebase
|
||||
from api.db.services import duplicate_name
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.file2document_service import File2DocumentService
|
||||
from api.utils import get_uuid
|
||||
from api.utils.file_utils import filename_type, read_potential_broken_pdf, thumbnail_img
|
||||
from rag.llm.cv_model import GptV4
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
class FileService(CommonService):
|
||||
# Service class for managing file operations and storage
|
||||
model = File
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id(cls, tenant_id, pf_id, page_number, items_per_page, orderby, desc, keywords):
|
||||
# Get files by parent folder ID with pagination and filtering
|
||||
# Args:
|
||||
# tenant_id: ID of the tenant
|
||||
# pf_id: Parent folder ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# keywords: Search keywords
|
||||
# Returns:
|
||||
# Tuple of (file_list, total_count)
|
||||
if keywords:
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), (fn.LOWER(cls.model.name).contains(keywords.lower())), ~(cls.model.id == pf_id))
|
||||
else:
|
||||
files = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == pf_id), ~(cls.model.id == pf_id))
|
||||
count = files.count()
|
||||
if desc:
|
||||
files = files.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
files = files.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
files = files.paginate(page_number, items_per_page)
|
||||
|
||||
res_files = list(files.dicts())
|
||||
for file in res_files:
|
||||
if file["type"] == FileType.FOLDER.value:
|
||||
file["size"] = cls.get_folder_size(file["id"])
|
||||
file["kbs_info"] = []
|
||||
children = list(
|
||||
cls.model.select()
|
||||
.where(
|
||||
(cls.model.tenant_id == tenant_id),
|
||||
(cls.model.parent_id == file["id"]),
|
||||
~(cls.model.id == file["id"]),
|
||||
)
|
||||
.dicts()
|
||||
)
|
||||
file["has_child_folder"] = any(value["type"] == FileType.FOLDER.value for value in children)
|
||||
continue
|
||||
kbs_info = cls.get_kb_id_by_file_id(file["id"])
|
||||
file["kbs_info"] = kbs_info
|
||||
|
||||
return res_files, count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_id_by_file_id(cls, file_id):
|
||||
# Get knowledge base IDs associated with a file
|
||||
# Args:
|
||||
# file_id: File ID
|
||||
# Returns:
|
||||
# List of dictionaries containing knowledge base IDs and names
|
||||
kbs = (
|
||||
cls.model.select(*[Knowledgebase.id, Knowledgebase.name])
|
||||
.join(File2Document, on=(File2Document.file_id == file_id))
|
||||
.join(Document, on=(File2Document.document_id == Document.id))
|
||||
.join(Knowledgebase, on=(Knowledgebase.id == Document.kb_id))
|
||||
.where(cls.model.id == file_id)
|
||||
)
|
||||
if not kbs:
|
||||
return []
|
||||
kbs_info_list = []
|
||||
for kb in list(kbs.dicts()):
|
||||
kbs_info_list.append({"kb_id": kb["id"], "kb_name": kb["name"]})
|
||||
return kbs_info_list
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_pf_id_name(cls, id, name):
|
||||
# Get file by parent folder ID and name
|
||||
# Args:
|
||||
# id: Parent folder ID
|
||||
# name: File name
|
||||
# Returns:
|
||||
# File object or None if not found
|
||||
file = cls.model.select().where((cls.model.parent_id == id) & (cls.model.name == name))
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].id)
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
return file
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_id_list_by_id(cls, id, name, count, res):
|
||||
# Recursively get list of file IDs by traversing folder structure
|
||||
# Args:
|
||||
# id: Starting folder ID
|
||||
# name: List of folder names to traverse
|
||||
# count: Current depth in traversal
|
||||
# res: List to store results
|
||||
# Returns:
|
||||
# List of file IDs
|
||||
if count < len(name):
|
||||
file = cls.get_by_pf_id_name(id, name[count])
|
||||
if file:
|
||||
res.append(file.id)
|
||||
return cls.get_id_list_by_id(file.id, name, count + 1, res)
|
||||
else:
|
||||
return res
|
||||
else:
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_innermost_file_ids(cls, folder_id, result_ids):
|
||||
# Get IDs of all files in the deepest level of folders
|
||||
# Args:
|
||||
# folder_id: Starting folder ID
|
||||
# result_ids: List to store results
|
||||
# Returns:
|
||||
# List of file IDs
|
||||
subfolders = cls.model.select().where(cls.model.parent_id == folder_id)
|
||||
if subfolders.exists():
|
||||
for subfolder in subfolders:
|
||||
cls.get_all_innermost_file_ids(subfolder.id, result_ids)
|
||||
else:
|
||||
result_ids.append(folder_id)
|
||||
return result_ids
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_file_ids_by_tenant_id(cls, tenant_id):
|
||||
fields = [cls.model.id]
|
||||
files = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id)
|
||||
files.order_by(cls.model.create_time.asc())
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
file_batch = files.offset(offset).limit(limit)
|
||||
_temp = list(file_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def create_folder(cls, file, parent_id, name, count):
|
||||
# Recursively create folder structure
|
||||
# Args:
|
||||
# file: Current file object
|
||||
# parent_id: Parent folder ID
|
||||
# name: List of folder names to create
|
||||
# count: Current depth in creation
|
||||
# Returns:
|
||||
# Created file object
|
||||
if count > len(name) - 2:
|
||||
return file
|
||||
else:
|
||||
file = cls.insert(
|
||||
{"id": get_uuid(), "parent_id": parent_id, "tenant_id": current_user.id, "created_by": current_user.id, "name": name[count], "location": "", "size": 0, "type": FileType.FOLDER.value}
|
||||
)
|
||||
return cls.create_folder(file, file.id, name, count + 1)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_parent_folder_exist(cls, parent_id):
|
||||
# Check if parent folder exists
|
||||
# Args:
|
||||
# parent_id: Parent folder ID
|
||||
# Returns:
|
||||
# Boolean indicating if folder exists
|
||||
parent_files = cls.model.select().where(cls.model.id == parent_id)
|
||||
if parent_files.count():
|
||||
return True
|
||||
cls.delete_folder_by_pf_id(parent_id)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_root_folder(cls, tenant_id):
|
||||
# Get or create root folder for tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Root folder dictionary
|
||||
for file in cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == cls.model.id)):
|
||||
return file.to_dict()
|
||||
|
||||
file_id = get_uuid()
|
||||
file = {
|
||||
"id": file_id,
|
||||
"parent_id": file_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": "/",
|
||||
"type": FileType.FOLDER.value,
|
||||
"size": 0,
|
||||
"location": "",
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_folder(cls, tenant_id):
|
||||
# Get knowledge base folder for tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Knowledge base folder dictionary
|
||||
root_folder = cls.get_root_folder(tenant_id)
|
||||
root_id = root_folder["id"]
|
||||
kb_folder = cls.model.select().where((cls.model.tenant_id == tenant_id), (cls.model.parent_id == root_id), (cls.model.name == KNOWLEDGEBASE_FOLDER_NAME)).first()
|
||||
if not kb_folder:
|
||||
kb_folder = cls.new_a_file_from_kb(tenant_id, KNOWLEDGEBASE_FOLDER_NAME, root_id)
|
||||
return kb_folder
|
||||
return kb_folder.to_dict()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def new_a_file_from_kb(cls, tenant_id, name, parent_id, ty=FileType.FOLDER.value, size=0, location=""):
|
||||
# Create a new file from knowledge base
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# name: File name
|
||||
# parent_id: Parent folder ID
|
||||
# ty: File type
|
||||
# size: File size
|
||||
# location: File location
|
||||
# Returns:
|
||||
# Created file dictionary
|
||||
for file in cls.query(tenant_id=tenant_id, parent_id=parent_id, name=name):
|
||||
return file.to_dict()
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": parent_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": name,
|
||||
"type": ty,
|
||||
"size": size,
|
||||
"location": location,
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def init_knowledgebase_docs(cls, root_id, tenant_id):
|
||||
# Initialize knowledge base documents
|
||||
# Args:
|
||||
# root_id: Root folder ID
|
||||
# tenant_id: Tenant ID
|
||||
for _ in cls.model.select().where((cls.model.name == KNOWLEDGEBASE_FOLDER_NAME) & (cls.model.parent_id == root_id)):
|
||||
return
|
||||
folder = cls.new_a_file_from_kb(tenant_id, KNOWLEDGEBASE_FOLDER_NAME, root_id)
|
||||
|
||||
for kb in Knowledgebase.select(*[Knowledgebase.id, Knowledgebase.name]).where(Knowledgebase.tenant_id == tenant_id):
|
||||
kb_folder = cls.new_a_file_from_kb(tenant_id, kb.name, folder["id"])
|
||||
for doc in DocumentService.query(kb_id=kb.id):
|
||||
FileService.add_file_from_kb(doc.to_dict(), kb_folder["id"], tenant_id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_parent_folder(cls, file_id):
|
||||
# Get parent folder of a file
|
||||
# Args:
|
||||
# file_id: File ID
|
||||
# Returns:
|
||||
# Parent folder object
|
||||
file = cls.model.select().where(cls.model.id == file_id)
|
||||
if file.count():
|
||||
e, file = cls.get_by_id(file[0].parent_id)
|
||||
if not e:
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
else:
|
||||
raise RuntimeError("Database error (File doesn't exist)!")
|
||||
return file
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_parent_folders(cls, start_id):
|
||||
# Get all parent folders in path
|
||||
# Args:
|
||||
# start_id: Starting file ID
|
||||
# Returns:
|
||||
# List of parent folder objects
|
||||
parent_folders = []
|
||||
current_id = start_id
|
||||
while current_id:
|
||||
e, file = cls.get_by_id(current_id)
|
||||
if file.parent_id != file.id and e:
|
||||
parent_folders.append(file)
|
||||
current_id = file.parent_id
|
||||
else:
|
||||
parent_folders.append(file)
|
||||
break
|
||||
return parent_folders
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def insert(cls, file):
|
||||
# Insert a new file record
|
||||
# Args:
|
||||
# file: File data dictionary
|
||||
# Returns:
|
||||
# Created file object
|
||||
if not cls.save(**file):
|
||||
raise RuntimeError("Database error (File)!")
|
||||
return File(**file)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete(cls, file):
|
||||
#
|
||||
return cls.delete_by_id(file.id)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_pf_id(cls, folder_id):
|
||||
return cls.model.delete().where(cls.model.parent_id == folder_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_folder_by_pf_id(cls, user_id, folder_id):
|
||||
try:
|
||||
files = cls.model.select().where((cls.model.tenant_id == user_id) & (cls.model.parent_id == folder_id))
|
||||
for file in files:
|
||||
cls.delete_folder_by_pf_id(user_id, file.id)
|
||||
return (cls.model.delete().where((cls.model.tenant_id == user_id) & (cls.model.id == folder_id)).execute(),)
|
||||
except Exception:
|
||||
logging.exception("delete_folder_by_pf_id")
|
||||
raise RuntimeError("Database error (File retrieval)!")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_file_count(cls, tenant_id):
|
||||
files = cls.model.select(cls.model.id).where(cls.model.tenant_id == tenant_id)
|
||||
return len(files)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_folder_size(cls, folder_id):
|
||||
size = 0
|
||||
|
||||
def dfs(parent_id):
|
||||
nonlocal size
|
||||
for f in cls.model.select(*[cls.model.id, cls.model.size, cls.model.type]).where(cls.model.parent_id == parent_id, cls.model.id != parent_id):
|
||||
size += f.size
|
||||
if f.type == FileType.FOLDER.value:
|
||||
dfs(f.id)
|
||||
|
||||
dfs(folder_id)
|
||||
return size
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def add_file_from_kb(cls, doc, kb_folder_id, tenant_id):
|
||||
for _ in File2DocumentService.get_by_document_id(doc["id"]):
|
||||
return
|
||||
file = {
|
||||
"id": get_uuid(),
|
||||
"parent_id": kb_folder_id,
|
||||
"tenant_id": tenant_id,
|
||||
"created_by": tenant_id,
|
||||
"name": doc["name"],
|
||||
"type": doc["type"],
|
||||
"size": doc["size"],
|
||||
"location": doc["location"],
|
||||
"source_type": FileSource.KNOWLEDGEBASE,
|
||||
}
|
||||
cls.save(**file)
|
||||
File2DocumentService.save(**{"id": get_uuid(), "file_id": file["id"], "document_id": doc["id"]})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def move_file(cls, file_ids, folder_id):
|
||||
try:
|
||||
cls.filter_update((cls.model.id << file_ids,), {"parent_id": folder_id})
|
||||
except Exception:
|
||||
logging.exception("move_file")
|
||||
raise RuntimeError("Database error (File move)!")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
async def upload_document(self, kb, file_objs, user_id):
|
||||
root_folder = self.get_root_folder(user_id)
|
||||
pf_id = root_folder["id"]
|
||||
self.init_knowledgebase_docs(pf_id, user_id)
|
||||
kb_root_folder = self.get_kb_folder(user_id)
|
||||
kb_folder = self.new_a_file_from_kb(kb.tenant_id, kb.name, kb_root_folder["id"])
|
||||
|
||||
err, files = [], []
|
||||
for file in file_objs:
|
||||
try:
|
||||
DocumentService.check_doc_health(kb.tenant_id, file.filename)
|
||||
filename = duplicate_name(DocumentService.query, name=file.filename, kb_id=kb.id)
|
||||
filetype = filename_type(filename)
|
||||
if filetype == FileType.OTHER.value:
|
||||
raise RuntimeError("This type of file has not been supported yet!")
|
||||
|
||||
location = filename
|
||||
while STORAGE_IMPL.obj_exist(kb.id, location):
|
||||
location += "_"
|
||||
|
||||
blob = await file.read()
|
||||
if filetype == FileType.PDF.value:
|
||||
blob = read_potential_broken_pdf(blob)
|
||||
STORAGE_IMPL.put(kb.id, location, blob)
|
||||
|
||||
doc_id = get_uuid()
|
||||
|
||||
img = thumbnail_img(filename, blob)
|
||||
thumbnail_location = ""
|
||||
if img is not None:
|
||||
thumbnail_location = f"thumbnail_{doc_id}.png"
|
||||
STORAGE_IMPL.put(kb.id, thumbnail_location, img)
|
||||
|
||||
doc = {
|
||||
"id": doc_id,
|
||||
"kb_id": kb.id,
|
||||
"parser_id": self.get_parser(filetype, filename, kb.parser_id),
|
||||
"pipeline_id": kb.pipeline_id,
|
||||
"parser_config": kb.parser_config,
|
||||
"created_by": user_id,
|
||||
"type": filetype,
|
||||
"name": filename,
|
||||
"suffix": Path(filename).suffix.lstrip("."),
|
||||
"location": location,
|
||||
"size": len(blob),
|
||||
"thumbnail": thumbnail_location,
|
||||
}
|
||||
DocumentService.insert(doc)
|
||||
|
||||
FileService.add_file_from_kb(doc, kb_folder["id"], kb.tenant_id)
|
||||
files.append((doc, blob))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
err.append(file.filename + ": " + str(e))
|
||||
|
||||
return err, files
|
||||
|
||||
@staticmethod
|
||||
async def parse_docs(file_objs, user_id):
|
||||
exe = ThreadPoolExecutor(max_workers=12)
|
||||
threads = []
|
||||
for file in file_objs:
|
||||
# Check if file has async read method (UploadFile)
|
||||
if hasattr(file, 'read') and hasattr(file.read, '__call__'):
|
||||
try:
|
||||
# Try to get the coroutine to check if it's async
|
||||
read_result = file.read()
|
||||
if hasattr(read_result, '__await__'):
|
||||
# It's an async method, await it
|
||||
blob = await read_result
|
||||
else:
|
||||
# It's a sync method
|
||||
blob = read_result
|
||||
except Exception:
|
||||
# Fallback to sync read
|
||||
blob = file.read()
|
||||
else:
|
||||
blob = file.read()
|
||||
|
||||
threads.append(exe.submit(FileService.parse, file.filename, blob, False))
|
||||
|
||||
res = []
|
||||
for th in threads:
|
||||
res.append(th.result())
|
||||
|
||||
return "\n\n".join(res)
|
||||
|
||||
@staticmethod
|
||||
def parse(filename, blob, img_base64=True, tenant_id=None):
|
||||
from rag.app import audio, email, naive, picture, presentation
|
||||
|
||||
def dummy(prog=None, msg=""):
|
||||
pass
|
||||
|
||||
FACTORY = {ParserType.PRESENTATION.value: presentation, ParserType.PICTURE.value: picture, ParserType.AUDIO.value: audio, ParserType.EMAIL.value: email}
|
||||
parser_config = {"chunk_token_num": 16096, "delimiter": "\n!?;。;!?", "layout_recognize": "Plain Text"}
|
||||
kwargs = {"lang": "English", "callback": dummy, "parser_config": parser_config, "from_page": 0, "to_page": 100000, "tenant_id": current_user.id if current_user else tenant_id}
|
||||
file_type = filename_type(filename)
|
||||
if img_base64 and file_type == FileType.VISUAL.value:
|
||||
return GptV4.image2base64(blob)
|
||||
cks = FACTORY.get(FileService.get_parser(filename_type(filename), filename, ""), naive).chunk(filename, blob, **kwargs)
|
||||
return "\n".join([ck["content_with_weight"] for ck in cks])
|
||||
|
||||
@staticmethod
|
||||
def get_parser(doc_type, filename, default):
|
||||
if doc_type == FileType.VISUAL:
|
||||
return ParserType.PICTURE.value
|
||||
if doc_type == FileType.AURAL:
|
||||
return ParserType.AUDIO.value
|
||||
if re.search(r"\.(ppt|pptx|pages)$", filename):
|
||||
return ParserType.PRESENTATION.value
|
||||
if re.search(r"\.(msg|eml)$", filename):
|
||||
return ParserType.EMAIL.value
|
||||
return default
|
||||
|
||||
@staticmethod
|
||||
def get_blob(user_id, location):
|
||||
bname = f"{user_id}-downloads"
|
||||
return STORAGE_IMPL.get(bname, location)
|
||||
|
||||
@staticmethod
|
||||
def put_blob(user_id, location, blob):
|
||||
bname = f"{user_id}-downloads"
|
||||
return STORAGE_IMPL.put(bname, location, blob)
|
||||
496
api/db/services/knowledgebase_service.py
Normal file
496
api/db/services/knowledgebase_service.py
Normal file
@@ -0,0 +1,496 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
from peewee import fn, JOIN
|
||||
|
||||
from api.db import StatusEnum, TenantPermission
|
||||
from api.db.db_models import DB, Document, Knowledgebase, User, UserTenant, UserCanvas
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class KnowledgebaseService(CommonService):
|
||||
"""Service class for managing knowledge base operations.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for knowledge base
|
||||
management, including document parsing status tracking, access control, and configuration
|
||||
management. It handles operations such as listing, creating, updating, and deleting
|
||||
knowledge bases, as well as managing their associated documents and permissions.
|
||||
|
||||
The class implements a comprehensive set of methods for:
|
||||
- Document parsing status verification
|
||||
- Knowledge base access control
|
||||
- Parser configuration management
|
||||
- Tenant-based knowledge base organization
|
||||
|
||||
Attributes:
|
||||
model: The Knowledgebase model class for database operations.
|
||||
"""
|
||||
model = Knowledgebase
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, kb_id, user_id):
|
||||
"""Check if a knowledge base can be deleted by a specific user.
|
||||
|
||||
This method verifies whether a user has permission to delete a knowledge base
|
||||
by checking if they are the creator of that knowledge base.
|
||||
|
||||
Args:
|
||||
kb_id (str): The unique identifier of the knowledge base to check.
|
||||
user_id (str): The unique identifier of the user attempting the deletion.
|
||||
|
||||
Returns:
|
||||
bool: True if the user has permission to delete the knowledge base,
|
||||
False if the user doesn't have permission or the knowledge base doesn't exist.
|
||||
|
||||
Example:
|
||||
>>> KnowledgebaseService.accessible4deletion("kb123", "user456")
|
||||
True
|
||||
|
||||
Note:
|
||||
- This method only checks creator permissions
|
||||
- A return value of False can mean either:
|
||||
1. The knowledge base doesn't exist
|
||||
2. The user is not the creator of the knowledge base
|
||||
"""
|
||||
# Check if a knowledge base can be deleted by a user
|
||||
docs = cls.model.select(
|
||||
cls.model.id).where(cls.model.id == kb_id, cls.model.created_by == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_parsed_done(cls, kb_id):
|
||||
# Check if all documents in the knowledge base have completed parsing
|
||||
#
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
#
|
||||
# Returns:
|
||||
# If all documents are parsed successfully, returns (True, None)
|
||||
# If any document is not fully parsed, returns (False, error_message)
|
||||
from api.db import TaskStatus
|
||||
from api.db.services.document_service import DocumentService
|
||||
|
||||
# Get knowledge base information
|
||||
kbs = cls.query(id=kb_id)
|
||||
if not kbs:
|
||||
return False, "Knowledge base not found"
|
||||
kb = kbs[0]
|
||||
|
||||
# Get all documents in the knowledge base
|
||||
docs, _ = DocumentService.get_by_kb_id(kb_id, 1, 1000, "create_time", True, "", [], [])
|
||||
|
||||
# Check parsing status of each document
|
||||
for doc in docs:
|
||||
# If document is being parsed, don't allow chat creation
|
||||
if doc['run'] == TaskStatus.RUNNING.value or doc['run'] == TaskStatus.CANCEL.value or doc['run'] == TaskStatus.FAIL.value:
|
||||
return False, f"Document '{doc['name']}' in dataset '{kb.name}' is still being parsed. Please wait until all documents are parsed before starting a chat."
|
||||
# If document is not yet parsed and has no chunks, don't allow chat creation
|
||||
if doc['run'] == TaskStatus.UNSTART.value and doc['chunk_num'] == 0:
|
||||
return False, f"Document '{doc['name']}' in dataset '{kb.name}' has not been parsed yet. Please parse all documents before starting a chat."
|
||||
|
||||
return True, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_documents_by_ids(cls, kb_ids):
|
||||
# Get document IDs associated with given knowledge base IDs
|
||||
# Args:
|
||||
# kb_ids: List of knowledge base IDs
|
||||
# Returns:
|
||||
# List of document IDs
|
||||
doc_ids = cls.model.select(Document.id.alias("document_id")).join(Document, on=(cls.model.id == Document.kb_id)).where(
|
||||
cls.model.id.in_(kb_ids)
|
||||
)
|
||||
doc_ids = list(doc_ids.dicts())
|
||||
doc_ids = [doc["document_id"] for doc in doc_ids]
|
||||
return doc_ids
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page,
|
||||
orderby, desc, keywords,
|
||||
parser_id=None
|
||||
):
|
||||
# Get knowledge bases by tenant IDs with pagination and filtering
|
||||
# Args:
|
||||
# joined_tenant_ids: List of tenant IDs
|
||||
# user_id: Current user ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# keywords: Search keywords
|
||||
# parser_id: Optional parser ID filter
|
||||
# Returns:
|
||||
# Tuple of (knowledge_base_list, total_count)
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.name,
|
||||
cls.model.language,
|
||||
cls.model.description,
|
||||
cls.model.tenant_id,
|
||||
cls.model.permission,
|
||||
cls.model.doc_num,
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.embd_id,
|
||||
User.nickname,
|
||||
User.avatar.alias('tenant_avatar'),
|
||||
cls.model.update_time
|
||||
]
|
||||
if keywords:
|
||||
kbs = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id)).where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value),
|
||||
(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
)
|
||||
else:
|
||||
kbs = cls.model.select(*fields).join(User, on=(cls.model.tenant_id == User.id)).where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if parser_id:
|
||||
kbs = kbs.where(cls.model.parser_id == parser_id)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = kbs.count()
|
||||
|
||||
if page_number and items_per_page:
|
||||
kbs = kbs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(kbs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_kb_by_tenant_ids(cls, tenant_ids, user_id):
|
||||
# will get all permitted kb, be cautious.
|
||||
fields = [
|
||||
cls.model.name,
|
||||
cls.model.language,
|
||||
cls.model.permission,
|
||||
cls.model.doc_num,
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.status,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date
|
||||
]
|
||||
# find team kb and owned kb
|
||||
kbs = cls.model.select(*fields).where(
|
||||
(cls.model.tenant_id.in_(tenant_ids) & (cls.model.permission ==TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id
|
||||
)
|
||||
)
|
||||
# sort by create_time asc
|
||||
kbs.order_by(cls.model.create_time.asc())
|
||||
# maybe cause slow query by deep paginate, optimize later.
|
||||
offset, limit = 0, 50
|
||||
res = []
|
||||
while True:
|
||||
kb_batch = kbs.offset(offset).limit(limit)
|
||||
_temp = list(kb_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_ids(cls, tenant_id):
|
||||
# Get all knowledge base IDs for a tenant
|
||||
# Args:
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# List of knowledge base IDs
|
||||
fields = [
|
||||
cls.model.id,
|
||||
]
|
||||
kbs = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id)
|
||||
kb_ids = [kb.id for kb in kbs]
|
||||
return kb_ids
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, kb_id):
|
||||
# Get detailed information about a knowledge base
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# Returns:
|
||||
# Dictionary containing knowledge base details
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.embd_id,
|
||||
cls.model.avatar,
|
||||
cls.model.name,
|
||||
cls.model.language,
|
||||
cls.model.description,
|
||||
cls.model.permission,
|
||||
cls.model.doc_num,
|
||||
cls.model.token_num,
|
||||
cls.model.chunk_num,
|
||||
cls.model.parser_id,
|
||||
cls.model.pipeline_id,
|
||||
UserCanvas.title.alias("pipeline_name"),
|
||||
UserCanvas.avatar.alias("pipeline_avatar"),
|
||||
cls.model.parser_config,
|
||||
cls.model.pagerank,
|
||||
cls.model.graphrag_task_id,
|
||||
cls.model.graphrag_task_finish_at,
|
||||
cls.model.raptor_task_id,
|
||||
cls.model.raptor_task_finish_at,
|
||||
cls.model.mindmap_task_id,
|
||||
cls.model.mindmap_task_finish_at,
|
||||
cls.model.create_time,
|
||||
cls.model.update_time
|
||||
]
|
||||
kbs = cls.model.select(*fields)\
|
||||
.join(UserCanvas, on=(cls.model.pipeline_id == UserCanvas.id), join_type=JOIN.LEFT_OUTER)\
|
||||
.where(
|
||||
(cls.model.id == kb_id),
|
||||
(cls.model.status == StatusEnum.VALID.value)
|
||||
).dicts()
|
||||
if not kbs:
|
||||
return
|
||||
return kbs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_parser_config(cls, id, config):
|
||||
# Update parser configuration for a knowledge base
|
||||
# Args:
|
||||
# id: Knowledge base ID
|
||||
# config: New parser configuration
|
||||
e, m = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"knowledgebase({id}) not found.")
|
||||
|
||||
def dfs_update(old, new):
|
||||
# Deep update of nested configuration
|
||||
for k, v in new.items():
|
||||
if k not in old:
|
||||
old[k] = v
|
||||
continue
|
||||
if isinstance(v, dict):
|
||||
assert isinstance(old[k], dict)
|
||||
dfs_update(old[k], v)
|
||||
elif isinstance(v, list):
|
||||
assert isinstance(old[k], list)
|
||||
old[k] = list(set(old[k] + v))
|
||||
else:
|
||||
old[k] = v
|
||||
|
||||
dfs_update(m.parser_config, config)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_field_map(cls, id):
|
||||
e, m = cls.get_by_id(id)
|
||||
if not e:
|
||||
raise LookupError(f"knowledgebase({id}) not found.")
|
||||
|
||||
m.parser_config.pop("field_map", None)
|
||||
cls.update_by_id(id, {"parser_config": m.parser_config})
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_field_map(cls, ids):
|
||||
# Get field mappings for knowledge bases
|
||||
# Args:
|
||||
# ids: List of knowledge base IDs
|
||||
# Returns:
|
||||
# Dictionary of field mappings
|
||||
conf = {}
|
||||
for k in cls.get_by_ids(ids):
|
||||
if k.parser_config and "field_map" in k.parser_config:
|
||||
conf.update(k.parser_config["field_map"])
|
||||
return conf
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_name(cls, kb_name, tenant_id):
|
||||
# Get knowledge base by name and tenant ID
|
||||
# Args:
|
||||
# kb_name: Knowledge base name
|
||||
# tenant_id: Tenant ID
|
||||
# Returns:
|
||||
# Tuple of (exists, knowledge_base)
|
||||
kb = cls.model.select().where(
|
||||
(cls.model.name == kb_name)
|
||||
& (cls.model.tenant_id == tenant_id)
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if kb:
|
||||
return True, kb[0]
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_ids(cls):
|
||||
# Get all knowledge base IDs
|
||||
# Returns:
|
||||
# List of all knowledge base IDs
|
||||
return [m["id"] for m in cls.model.select(cls.model.id).dicts()]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_list(cls, joined_tenant_ids, user_id,
|
||||
page_number, items_per_page, orderby, desc, id, name):
|
||||
# Get list of knowledge bases with filtering and pagination
|
||||
# Args:
|
||||
# joined_tenant_ids: List of tenant IDs
|
||||
# user_id: Current user ID
|
||||
# page_number: Page number for pagination
|
||||
# items_per_page: Number of items per page
|
||||
# orderby: Field to order by
|
||||
# desc: Boolean indicating descending order
|
||||
# id: Optional ID filter
|
||||
# name: Optional name filter
|
||||
# Returns:
|
||||
# List of knowledge bases
|
||||
kbs = cls.model.select()
|
||||
if id:
|
||||
kbs = kbs.where(cls.model.id == id)
|
||||
if name:
|
||||
kbs = kbs.where(cls.model.name == name)
|
||||
kbs = kbs.where(
|
||||
((cls.model.tenant_id.in_(joined_tenant_ids) & (cls.model.permission ==
|
||||
TenantPermission.TEAM.value)) | (
|
||||
cls.model.tenant_id == user_id))
|
||||
& (cls.model.status == StatusEnum.VALID.value)
|
||||
)
|
||||
if desc:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
kbs = kbs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
kbs = kbs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(kbs.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible(cls, kb_id, user_id):
|
||||
# Check if a knowledge base is accessible by a user
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# Boolean indicating accessibility
|
||||
docs = cls.model.select(
|
||||
cls.model.id).join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
docs = docs.dicts()
|
||||
if not docs:
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_id(cls, kb_id, user_id):
|
||||
# Get knowledge base by ID and user ID
|
||||
# Args:
|
||||
# kb_id: Knowledge base ID
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# List containing knowledge base information
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.id == kb_id, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
return list(kbs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_kb_by_name(cls, kb_name, user_id):
|
||||
# Get knowledge base by name and user ID
|
||||
# Args:
|
||||
# kb_name: Knowledge base name
|
||||
# user_id: User ID
|
||||
# Returns:
|
||||
# List containing knowledge base information
|
||||
kbs = cls.model.select().join(UserTenant, on=(UserTenant.tenant_id == Knowledgebase.tenant_id)
|
||||
).where(cls.model.name == kb_name, UserTenant.user_id == user_id).paginate(0, 1)
|
||||
kbs = kbs.dicts()
|
||||
return list(kbs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def atomic_increase_doc_num_by_id(cls, kb_id):
|
||||
data = {}
|
||||
data["update_time"] = current_timestamp()
|
||||
data["update_date"] = datetime_format(datetime.now())
|
||||
data["doc_num"] = cls.model.doc_num + 1
|
||||
num = cls.model.update(data).where(cls.model.id == kb_id).execute()
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_document_number_in_init(cls, kb_id, doc_num):
|
||||
"""
|
||||
Only use this function when init system
|
||||
"""
|
||||
ok, kb = cls.get_by_id(kb_id)
|
||||
if not ok:
|
||||
return
|
||||
kb.doc_num = doc_num
|
||||
|
||||
dirty_fields = kb.dirty_fields
|
||||
if cls.model._meta.combined.get("update_time") in dirty_fields:
|
||||
dirty_fields.remove(cls.model._meta.combined["update_time"])
|
||||
|
||||
if cls.model._meta.combined.get("update_date") in dirty_fields:
|
||||
dirty_fields.remove(cls.model._meta.combined["update_date"])
|
||||
|
||||
try:
|
||||
kb.save(only=dirty_fields)
|
||||
except ValueError as e:
|
||||
if str(e) == "no data to save!":
|
||||
pass # that's OK
|
||||
else:
|
||||
raise e
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def decrease_document_num_in_delete(cls, kb_id, doc_num_info: dict):
|
||||
kb_row = cls.model.get_by_id(kb_id)
|
||||
if not kb_row:
|
||||
raise RuntimeError(f"kb_id {kb_id} does not exist")
|
||||
update_dict = {
|
||||
'doc_num': kb_row.doc_num - doc_num_info['doc_num'],
|
||||
'chunk_num': kb_row.chunk_num - doc_num_info['chunk_num'],
|
||||
'token_num': kb_row.token_num - doc_num_info['token_num'],
|
||||
'update_time': current_timestamp(),
|
||||
'update_date': datetime_format(datetime.now())
|
||||
}
|
||||
return cls.model.update(update_dict).where(cls.model.id == kb_id).execute()
|
||||
76
api/db/services/langfuse_service.py
Normal file
76
api/db/services/langfuse_service.py
Normal file
@@ -0,0 +1,76 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
import peewee
|
||||
|
||||
from api.db.db_models import DB, TenantLangfuse
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class TenantLangfuseService(CommonService):
|
||||
"""
|
||||
All methods that modify the status should be enclosed within a DB.atomic() context to ensure atomicity
|
||||
and maintain data integrity in case of errors during execution.
|
||||
"""
|
||||
|
||||
model = TenantLangfuse
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant(cls, tenant_id):
|
||||
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||
try:
|
||||
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).first()
|
||||
return keys
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant_with_info(cls, tenant_id):
|
||||
fields = [cls.model.tenant_id, cls.model.host, cls.model.secret_key, cls.model.public_key]
|
||||
try:
|
||||
keys = cls.model.select(*fields).where(cls.model.tenant_id == tenant_id).dicts().first()
|
||||
return keys
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_ty_tenant_id(cls, tenant_id):
|
||||
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
|
||||
|
||||
@classmethod
|
||||
def update_by_tenant(cls, tenant_id, langfuse_keys):
|
||||
langfuse_keys["update_time"] = current_timestamp()
|
||||
langfuse_keys["update_date"] = datetime_format(datetime.now())
|
||||
return cls.model.update(**langfuse_keys).where(cls.model.tenant_id == tenant_id).execute()
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
obj = cls.model.create(**kwargs)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def delete_model(cls, langfuse_model):
|
||||
langfuse_model.delete_instance()
|
||||
279
api/db/services/llm_service.py
Normal file
279
api/db/services/llm_service.py
Normal file
@@ -0,0 +1,279 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import inspect
|
||||
import logging
|
||||
import re
|
||||
from functools import partial
|
||||
from typing import Generator
|
||||
from api.db.db_models import LLM
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.tenant_llm_service import LLM4Tenant, TenantLLMService
|
||||
|
||||
|
||||
class LLMService(CommonService):
|
||||
model = LLM
|
||||
|
||||
|
||||
def get_init_tenant_llm(user_id):
|
||||
from api import settings
|
||||
tenant_llm = []
|
||||
|
||||
seen = set()
|
||||
factory_configs = []
|
||||
for factory_config in [
|
||||
settings.CHAT_CFG,
|
||||
settings.EMBEDDING_CFG,
|
||||
settings.ASR_CFG,
|
||||
settings.IMAGE2TEXT_CFG,
|
||||
settings.RERANK_CFG,
|
||||
]:
|
||||
factory_name = factory_config["factory"]
|
||||
if factory_name not in seen:
|
||||
seen.add(factory_name)
|
||||
factory_configs.append(factory_config)
|
||||
|
||||
for factory_config in factory_configs:
|
||||
for llm in LLMService.query(fid=factory_config["factory"]):
|
||||
tenant_llm.append(
|
||||
{
|
||||
"tenant_id": user_id,
|
||||
"llm_factory": factory_config["factory"],
|
||||
"llm_name": llm.llm_name,
|
||||
"model_type": llm.model_type,
|
||||
"api_key": factory_config["api_key"],
|
||||
"api_base": factory_config["base_url"],
|
||||
"max_tokens": llm.max_tokens if llm.max_tokens else 8192,
|
||||
}
|
||||
)
|
||||
|
||||
if settings.LIGHTEN != 1:
|
||||
for buildin_embedding_model in settings.BUILTIN_EMBEDDING_MODELS:
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(buildin_embedding_model)
|
||||
tenant_llm.append(
|
||||
{
|
||||
"tenant_id": user_id,
|
||||
"llm_factory": fid,
|
||||
"llm_name": mdlnm,
|
||||
"model_type": "embedding",
|
||||
"api_key": "",
|
||||
"api_base": "",
|
||||
"max_tokens": 1024 if buildin_embedding_model == "BAAI/bge-large-zh-v1.5@BAAI" else 512,
|
||||
}
|
||||
)
|
||||
|
||||
unique = {}
|
||||
for item in tenant_llm:
|
||||
key = (item["tenant_id"], item["llm_factory"], item["llm_name"])
|
||||
if key not in unique:
|
||||
unique[key] = item
|
||||
return list(unique.values())
|
||||
|
||||
|
||||
class LLMBundle(LLM4Tenant):
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):
|
||||
super().__init__(tenant_id, llm_type, llm_name, lang, **kwargs)
|
||||
|
||||
def bind_tools(self, toolcall_session, tools):
|
||||
if not self.is_tools:
|
||||
logging.warning(f"Model {self.llm_name} does not support tool call, but you have assigned one or more tools to it!")
|
||||
return
|
||||
self.mdl.bind_tools(toolcall_session, tools)
|
||||
|
||||
def encode(self, texts: list):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="encode", model=self.llm_name, input={"texts": texts})
|
||||
|
||||
embeddings, used_tokens = self.mdl.encode(texts)
|
||||
llm_name = getattr(self, "llm_name", None)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, llm_name):
|
||||
logging.error("LLMBundle.encode can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return embeddings, used_tokens
|
||||
|
||||
def encode_queries(self, query: str):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="encode_queries", model=self.llm_name, input={"query": query})
|
||||
|
||||
emd, used_tokens = self.mdl.encode_queries(query)
|
||||
llm_name = getattr(self, "llm_name", None)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, llm_name):
|
||||
logging.error("LLMBundle.encode_queries can't update token usage for {}/EMBEDDING used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return emd, used_tokens
|
||||
|
||||
def similarity(self, query: str, texts: list):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="similarity", model=self.llm_name, input={"query": query, "texts": texts})
|
||||
|
||||
sim, used_tokens = self.mdl.similarity(query, texts)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.similarity can't update token usage for {}/RERANK used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return sim, used_tokens
|
||||
|
||||
def describe(self, image, max_tokens=300):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="describe", metadata={"model": self.llm_name})
|
||||
|
||||
txt, used_tokens = self.mdl.describe(image)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return txt
|
||||
|
||||
def describe_with_prompt(self, image, prompt):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="describe_with_prompt", metadata={"model": self.llm_name, "prompt": prompt})
|
||||
|
||||
txt, used_tokens = self.mdl.describe_with_prompt(image, prompt)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.describe can't update token usage for {}/IMAGE2TEXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return txt
|
||||
|
||||
def transcription(self, audio):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="transcription", metadata={"model": self.llm_name})
|
||||
|
||||
txt, used_tokens = self.mdl.transcription(audio)
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens):
|
||||
logging.error("LLMBundle.transcription can't update token usage for {}/SEQUENCE2TXT used_tokens: {}".format(self.tenant_id, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return txt
|
||||
|
||||
def tts(self, text: str) -> Generator[bytes, None, None]:
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="tts", input={"text": text})
|
||||
|
||||
for chunk in self.mdl.tts(text):
|
||||
if isinstance(chunk, int):
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, chunk, self.llm_name):
|
||||
logging.error("LLMBundle.tts can't update token usage for {}/TTS".format(self.tenant_id))
|
||||
return
|
||||
yield chunk
|
||||
|
||||
if self.langfuse:
|
||||
generation.end()
|
||||
|
||||
def _remove_reasoning_content(self, txt: str) -> str:
|
||||
first_think_start = txt.find("<think>")
|
||||
if first_think_start == -1:
|
||||
return txt
|
||||
|
||||
last_think_end = txt.rfind("</think>")
|
||||
if last_think_end == -1:
|
||||
return txt
|
||||
|
||||
if last_think_end < first_think_start:
|
||||
return txt
|
||||
|
||||
return txt[last_think_end + len("</think>") :]
|
||||
|
||||
@staticmethod
|
||||
def _clean_param(chat_partial, **kwargs):
|
||||
func = chat_partial.func
|
||||
sig = inspect.signature(func)
|
||||
keyword_args = []
|
||||
support_var_args = False
|
||||
for param in sig.parameters.values():
|
||||
if param.kind == inspect.Parameter.VAR_KEYWORD or param.kind == inspect.Parameter.VAR_POSITIONAL:
|
||||
support_var_args = True
|
||||
elif param.kind == inspect.Parameter.KEYWORD_ONLY:
|
||||
keyword_args.append(param.name)
|
||||
|
||||
use_kwargs = kwargs
|
||||
if not support_var_args:
|
||||
use_kwargs = {k: v for k, v in kwargs.items() if k in keyword_args}
|
||||
return use_kwargs
|
||||
|
||||
def chat(self, system: str, history: list, gen_conf: dict = {}, **kwargs) -> str:
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="chat", model=self.llm_name, input={"system": system, "history": history})
|
||||
|
||||
chat_partial = partial(self.mdl.chat, system, history, gen_conf)
|
||||
if self.is_tools and self.mdl.is_tools:
|
||||
chat_partial = partial(self.mdl.chat_with_tools, system, history, gen_conf)
|
||||
|
||||
use_kwargs = self._clean_param(chat_partial, **kwargs)
|
||||
txt, used_tokens = chat_partial(**use_kwargs)
|
||||
txt = self._remove_reasoning_content(txt)
|
||||
|
||||
if not self.verbose_tool_use:
|
||||
txt = re.sub(r"<tool_call>.*?</tool_call>", "", txt, flags=re.DOTALL)
|
||||
|
||||
if isinstance(txt, int) and not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, used_tokens, self.llm_name):
|
||||
logging.error("LLMBundle.chat can't update token usage for {}/CHAT llm_name: {}, used_tokens: {}".format(self.tenant_id, self.llm_name, used_tokens))
|
||||
|
||||
if self.langfuse:
|
||||
generation.update(output={"output": txt}, usage_details={"total_tokens": used_tokens})
|
||||
generation.end()
|
||||
|
||||
return txt
|
||||
|
||||
def chat_streamly(self, system: str, history: list, gen_conf: dict = {}, **kwargs):
|
||||
if self.langfuse:
|
||||
generation = self.langfuse.start_generation(trace_context=self.trace_context, name="chat_streamly", model=self.llm_name, input={"system": system, "history": history})
|
||||
|
||||
ans = ""
|
||||
chat_partial = partial(self.mdl.chat_streamly, system, history, gen_conf)
|
||||
total_tokens = 0
|
||||
if self.is_tools and self.mdl.is_tools:
|
||||
chat_partial = partial(self.mdl.chat_streamly_with_tools, system, history, gen_conf)
|
||||
use_kwargs = self._clean_param(chat_partial, **kwargs)
|
||||
for txt in chat_partial(**use_kwargs):
|
||||
if isinstance(txt, int):
|
||||
total_tokens = txt
|
||||
if self.langfuse:
|
||||
generation.update(output={"output": ans})
|
||||
generation.end()
|
||||
break
|
||||
|
||||
if txt.endswith("</think>"):
|
||||
ans = ans.rstrip("</think>")
|
||||
|
||||
if not self.verbose_tool_use:
|
||||
txt = re.sub(r"<tool_call>.*?</tool_call>", "", txt, flags=re.DOTALL)
|
||||
|
||||
ans += txt
|
||||
yield ans
|
||||
|
||||
if total_tokens > 0:
|
||||
if not TenantLLMService.increase_usage(self.tenant_id, self.llm_type, txt, self.llm_name):
|
||||
logging.error("LLMBundle.chat_streamly can't update token usage for {}/CHAT llm_name: {}, content: {}".format(self.tenant_id, self.llm_name, txt))
|
||||
91
api/db/services/mcp_server_service.py
Normal file
91
api/db/services/mcp_server_service.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from peewee import fn
|
||||
|
||||
from api.db.db_models import DB, MCPServer
|
||||
from api.db.services.common_service import CommonService
|
||||
|
||||
|
||||
class MCPServerService(CommonService):
|
||||
"""Service class for managing MCP server related database operations.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for MCP server management,
|
||||
including MCP server creation, updates, and deletions.
|
||||
|
||||
Attributes:
|
||||
model: The MCPServer model class for database operations.
|
||||
"""
|
||||
|
||||
model = MCPServer
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_servers(cls, tenant_id: str, id_list: list[str] | None, page_number, items_per_page, orderby, desc, keywords):
|
||||
"""Retrieve all MCP servers associated with a tenant.
|
||||
|
||||
This method fetches all MCP servers for a given tenant, ordered by creation time.
|
||||
It only includes fields for list display.
|
||||
|
||||
Args:
|
||||
tenant_id (str): The unique identifier of the tenant.
|
||||
id_list (list[str]): Get servers by ID list. Will ignore this condition if None.
|
||||
|
||||
Returns:
|
||||
list[dict]: List of MCP server dictionaries containing MCP server details.
|
||||
Returns None if no MCP servers are found.
|
||||
"""
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.name,
|
||||
cls.model.server_type,
|
||||
cls.model.url,
|
||||
cls.model.description,
|
||||
cls.model.variables,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date,
|
||||
]
|
||||
|
||||
query = cls.model.select(*fields).order_by(cls.model.create_time.desc()).where(cls.model.tenant_id == tenant_id)
|
||||
|
||||
if id_list:
|
||||
query = query.where(cls.model.id.in_(id_list))
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if desc:
|
||||
query = query.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
query = query.order_by(cls.model.getter_by(orderby).asc())
|
||||
if page_number and items_per_page:
|
||||
query = query.paginate(page_number, items_per_page)
|
||||
|
||||
servers = list(query.dicts())
|
||||
if not servers:
|
||||
return None
|
||||
return servers
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_name_and_tenant(cls, name: str, tenant_id: str):
|
||||
try:
|
||||
mcp_server = cls.model.query(name=name, tenant_id=tenant_id)
|
||||
return bool(mcp_server), mcp_server
|
||||
except Exception:
|
||||
return False, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_tenant_id(cls, tenant_id: str):
|
||||
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
|
||||
263
api/db/services/pipeline_operation_log_service.py
Normal file
263
api/db/services/pipeline_operation_log_service.py
Normal file
@@ -0,0 +1,263 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db import VALID_PIPELINE_TASK_TYPES, PipelineTaskType
|
||||
from api.db.db_models import DB, Document, PipelineOperationLog
|
||||
from api.db.services.canvas_service import UserCanvasService
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.db.services.knowledgebase_service import KnowledgebaseService
|
||||
from api.db.services.task_service import GRAPH_RAPTOR_FAKE_DOC_ID
|
||||
from api.utils import current_timestamp, datetime_format, get_uuid
|
||||
|
||||
|
||||
class PipelineOperationLogService(CommonService):
|
||||
model = PipelineOperationLog
|
||||
|
||||
@classmethod
|
||||
def get_file_logs_fields(cls):
|
||||
return [
|
||||
cls.model.id,
|
||||
cls.model.document_id,
|
||||
cls.model.tenant_id,
|
||||
cls.model.kb_id,
|
||||
cls.model.pipeline_id,
|
||||
cls.model.pipeline_title,
|
||||
cls.model.parser_id,
|
||||
cls.model.document_name,
|
||||
cls.model.document_suffix,
|
||||
cls.model.document_type,
|
||||
cls.model.source_from,
|
||||
cls.model.progress,
|
||||
cls.model.progress_msg,
|
||||
cls.model.process_begin_at,
|
||||
cls.model.process_duration,
|
||||
cls.model.dsl,
|
||||
cls.model.task_type,
|
||||
cls.model.operation_status,
|
||||
cls.model.avatar,
|
||||
cls.model.status,
|
||||
cls.model.create_time,
|
||||
cls.model.create_date,
|
||||
cls.model.update_time,
|
||||
cls.model.update_date,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_dataset_logs_fields(cls):
|
||||
return [
|
||||
cls.model.id,
|
||||
cls.model.tenant_id,
|
||||
cls.model.kb_id,
|
||||
cls.model.progress,
|
||||
cls.model.progress_msg,
|
||||
cls.model.process_begin_at,
|
||||
cls.model.process_duration,
|
||||
cls.model.task_type,
|
||||
cls.model.operation_status,
|
||||
cls.model.avatar,
|
||||
cls.model.status,
|
||||
cls.model.create_time,
|
||||
cls.model.create_date,
|
||||
cls.model.update_time,
|
||||
cls.model.update_date,
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
"""
|
||||
wrap this function in a transaction
|
||||
"""
|
||||
sample_obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return sample_obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def create(cls, document_id, pipeline_id, task_type, fake_document_ids=[], dsl: str = "{}"):
|
||||
referred_document_id = document_id
|
||||
|
||||
if referred_document_id == GRAPH_RAPTOR_FAKE_DOC_ID and fake_document_ids:
|
||||
referred_document_id = fake_document_ids[0]
|
||||
ok, document = DocumentService.get_by_id(referred_document_id)
|
||||
if not ok:
|
||||
logging.warning(f"Document for referred_document_id {referred_document_id} not found")
|
||||
return
|
||||
DocumentService.update_progress_immediately([document.to_dict()])
|
||||
ok, document = DocumentService.get_by_id(referred_document_id)
|
||||
if not ok:
|
||||
logging.warning(f"Document for referred_document_id {referred_document_id} not found")
|
||||
return
|
||||
if document.progress not in [1, -1]:
|
||||
return
|
||||
operation_status = document.run
|
||||
|
||||
if pipeline_id:
|
||||
ok, user_pipeline = UserCanvasService.get_by_id(pipeline_id)
|
||||
if not ok:
|
||||
raise RuntimeError(f"Pipeline {pipeline_id} not found")
|
||||
tenant_id = user_pipeline.user_id
|
||||
title = user_pipeline.title
|
||||
avatar = user_pipeline.avatar
|
||||
else:
|
||||
ok, kb_info = KnowledgebaseService.get_by_id(document.kb_id)
|
||||
if not ok:
|
||||
raise RuntimeError(f"Cannot find knowledge base {document.kb_id} for referred_document {referred_document_id}")
|
||||
|
||||
tenant_id = kb_info.tenant_id
|
||||
title = document.parser_id
|
||||
avatar = document.thumbnail
|
||||
|
||||
if task_type not in VALID_PIPELINE_TASK_TYPES:
|
||||
raise ValueError(f"Invalid task type: {task_type}")
|
||||
|
||||
if task_type in [PipelineTaskType.GRAPH_RAG, PipelineTaskType.RAPTOR, PipelineTaskType.MINDMAP]:
|
||||
finish_at = document.process_begin_at + timedelta(seconds=document.process_duration)
|
||||
if task_type == PipelineTaskType.GRAPH_RAG:
|
||||
KnowledgebaseService.update_by_id(
|
||||
document.kb_id,
|
||||
{"graphrag_task_finish_at": finish_at},
|
||||
)
|
||||
elif task_type == PipelineTaskType.RAPTOR:
|
||||
KnowledgebaseService.update_by_id(
|
||||
document.kb_id,
|
||||
{"raptor_task_finish_at": finish_at},
|
||||
)
|
||||
elif task_type == PipelineTaskType.MINDMAP:
|
||||
KnowledgebaseService.update_by_id(
|
||||
document.kb_id,
|
||||
{"mindmap_task_finish_at": finish_at},
|
||||
)
|
||||
|
||||
log = dict(
|
||||
id=get_uuid(),
|
||||
document_id=document_id, # GRAPH_RAPTOR_FAKE_DOC_ID or real document_id
|
||||
tenant_id=tenant_id,
|
||||
kb_id=document.kb_id,
|
||||
pipeline_id=pipeline_id,
|
||||
pipeline_title=title,
|
||||
parser_id=document.parser_id,
|
||||
document_name=document.name,
|
||||
document_suffix=document.suffix,
|
||||
document_type=document.type,
|
||||
source_from="", # TODO: add in the future
|
||||
progress=document.progress,
|
||||
progress_msg=document.progress_msg,
|
||||
process_begin_at=document.process_begin_at,
|
||||
process_duration=document.process_duration,
|
||||
dsl=json.loads(dsl),
|
||||
task_type=task_type,
|
||||
operation_status=operation_status,
|
||||
avatar=avatar,
|
||||
)
|
||||
log["create_time"] = current_timestamp()
|
||||
log["create_date"] = datetime_format(datetime.now())
|
||||
log["update_time"] = current_timestamp()
|
||||
log["update_date"] = datetime_format(datetime.now())
|
||||
|
||||
with DB.atomic():
|
||||
obj = cls.save(**log)
|
||||
|
||||
limit = int(os.getenv("PIPELINE_OPERATION_LOG_LIMIT", 1000))
|
||||
total = cls.model.select().where(cls.model.kb_id == document.kb_id).count()
|
||||
|
||||
if total > limit:
|
||||
keep_ids = [m.id for m in cls.model.select(cls.model.id).where(cls.model.kb_id == document.kb_id).order_by(cls.model.create_time.desc()).limit(limit)]
|
||||
|
||||
deleted = cls.model.delete().where(cls.model.kb_id == document.kb_id, cls.model.id.not_in(keep_ids)).execute()
|
||||
logging.info(f"[PipelineOperationLogService] Cleaned {deleted} old logs, kept latest {limit} for {document.kb_id}")
|
||||
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def record_pipeline_operation(cls, document_id, pipeline_id, task_type, fake_document_ids=[]):
|
||||
return cls.create(document_id=document_id, pipeline_id=pipeline_id, task_type=task_type, fake_document_ids=fake_document_ids)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_file_logs_by_kb_id(cls, kb_id, page_number, items_per_page, orderby, desc, keywords, operation_status, types, suffix, create_date_from=None, create_date_to=None):
|
||||
fields = cls.get_file_logs_fields()
|
||||
if keywords:
|
||||
logs = cls.model.select(*fields).where((cls.model.kb_id == kb_id), (fn.LOWER(cls.model.document_name).contains(keywords.lower())))
|
||||
else:
|
||||
logs = cls.model.select(*fields).where(cls.model.kb_id == kb_id)
|
||||
|
||||
logs = logs.where(cls.model.document_id != GRAPH_RAPTOR_FAKE_DOC_ID)
|
||||
|
||||
if operation_status:
|
||||
logs = logs.where(cls.model.operation_status.in_(operation_status))
|
||||
if types:
|
||||
logs = logs.where(cls.model.document_type.in_(types))
|
||||
if suffix:
|
||||
logs = logs.where(cls.model.document_suffix.in_(suffix))
|
||||
if create_date_from:
|
||||
logs = logs.where(cls.model.create_date >= create_date_from)
|
||||
if create_date_to:
|
||||
logs = logs.where(cls.model.create_date <= create_date_to)
|
||||
|
||||
count = logs.count()
|
||||
if desc:
|
||||
logs = logs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
logs = logs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
if page_number and items_per_page:
|
||||
logs = logs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(logs.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_documents_info(cls, id):
|
||||
fields = [Document.id, Document.name, Document.progress, Document.kb_id]
|
||||
return (
|
||||
cls.model.select(*fields)
|
||||
.join(Document, on=(cls.model.document_id == Document.id))
|
||||
.where(
|
||||
cls.model.id == id
|
||||
)
|
||||
.dicts()
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_dataset_logs_by_kb_id(cls, kb_id, page_number, items_per_page, orderby, desc, operation_status, create_date_from=None, create_date_to=None):
|
||||
fields = cls.get_dataset_logs_fields()
|
||||
logs = cls.model.select(*fields).where((cls.model.kb_id == kb_id), (cls.model.document_id == GRAPH_RAPTOR_FAKE_DOC_ID))
|
||||
|
||||
if operation_status:
|
||||
logs = logs.where(cls.model.operation_status.in_(operation_status))
|
||||
if create_date_from:
|
||||
logs = logs.where(cls.model.create_date >= create_date_from)
|
||||
if create_date_to:
|
||||
logs = logs.where(cls.model.create_date <= create_date_to)
|
||||
|
||||
count = logs.count()
|
||||
if desc:
|
||||
logs = logs.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
logs = logs.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
if page_number and items_per_page:
|
||||
logs = logs.paginate(page_number, items_per_page)
|
||||
|
||||
return list(logs.dicts()), count
|
||||
117
api/db/services/search_service.py
Normal file
117
api/db/services/search_service.py
Normal file
@@ -0,0 +1,117 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from datetime import datetime
|
||||
|
||||
from peewee import fn
|
||||
|
||||
from api.db import StatusEnum
|
||||
from api.db.db_models import DB, Search, User
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import current_timestamp, datetime_format
|
||||
|
||||
|
||||
class SearchService(CommonService):
|
||||
model = Search
|
||||
|
||||
@classmethod
|
||||
def save(cls, **kwargs):
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
obj = cls.model.create(**kwargs)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def accessible4deletion(cls, search_id, user_id) -> bool:
|
||||
search = (
|
||||
cls.model.select(cls.model.id)
|
||||
.where(
|
||||
cls.model.id == search_id,
|
||||
cls.model.created_by == user_id,
|
||||
cls.model.status == StatusEnum.VALID.value,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
return search is not None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_detail(cls, search_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.tenant_id,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.created_by,
|
||||
cls.model.search_config,
|
||||
cls.model.update_time,
|
||||
User.nickname,
|
||||
User.avatar.alias("tenant_avatar"),
|
||||
]
|
||||
search = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=((User.id == cls.model.tenant_id) & (User.status == StatusEnum.VALID.value)))
|
||||
.where((cls.model.id == search_id) & (cls.model.status == StatusEnum.VALID.value))
|
||||
.first()
|
||||
.to_dict()
|
||||
)
|
||||
if not search:
|
||||
return {}
|
||||
return search
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_ids(cls, joined_tenant_ids, user_id, page_number, items_per_page, orderby, desc, keywords):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.avatar,
|
||||
cls.model.tenant_id,
|
||||
cls.model.name,
|
||||
cls.model.description,
|
||||
cls.model.created_by,
|
||||
cls.model.status,
|
||||
cls.model.update_time,
|
||||
cls.model.create_time,
|
||||
User.nickname,
|
||||
User.avatar.alias("tenant_avatar"),
|
||||
]
|
||||
query = (
|
||||
cls.model.select(*fields)
|
||||
.join(User, on=(cls.model.tenant_id == User.id))
|
||||
.where(((cls.model.tenant_id.in_(joined_tenant_ids)) | (cls.model.tenant_id == user_id)) & (cls.model.status == StatusEnum.VALID.value))
|
||||
)
|
||||
|
||||
if keywords:
|
||||
query = query.where(fn.LOWER(cls.model.name).contains(keywords.lower()))
|
||||
if desc:
|
||||
query = query.order_by(cls.model.getter_by(orderby).desc())
|
||||
else:
|
||||
query = query.order_by(cls.model.getter_by(orderby).asc())
|
||||
|
||||
count = query.count()
|
||||
|
||||
if page_number and items_per_page:
|
||||
query = query.paginate(page_number, items_per_page)
|
||||
|
||||
return list(query.dicts()), count
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_tenant_id(cls, tenant_id):
|
||||
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
|
||||
522
api/db/services/task_service.py
Normal file
522
api/db/services/task_service.py
Normal file
@@ -0,0 +1,522 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import xxhash
|
||||
from datetime import datetime
|
||||
|
||||
from api.db.db_utils import bulk_insert_into_db
|
||||
from deepdoc.parser import PdfParser
|
||||
from peewee import JOIN
|
||||
from api.db.db_models import DB, File2Document, File
|
||||
from api.db import StatusEnum, FileType, TaskStatus
|
||||
from api.db.db_models import Task, Document, Knowledgebase, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api.utils import current_timestamp, get_uuid
|
||||
from deepdoc.parser.excel_parser import RAGFlowExcelParser
|
||||
from rag.settings import get_svr_queue_name
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from api import settings
|
||||
from rag.nlp import search
|
||||
|
||||
CANVAS_DEBUG_DOC_ID = "dataflow_x"
|
||||
GRAPH_RAPTOR_FAKE_DOC_ID = "graph_raptor_x"
|
||||
|
||||
def trim_header_by_lines(text: str, max_length) -> str:
|
||||
# Trim header text to maximum length while preserving line breaks
|
||||
# Args:
|
||||
# text: Input text to trim
|
||||
# max_length: Maximum allowed length
|
||||
# Returns:
|
||||
# Trimmed text
|
||||
len_text = len(text)
|
||||
if len_text <= max_length:
|
||||
return text
|
||||
for i in range(len_text):
|
||||
if text[i] == '\n' and len_text - i <= max_length:
|
||||
return text[i + 1:]
|
||||
return text
|
||||
|
||||
|
||||
class TaskService(CommonService):
|
||||
"""Service class for managing document processing tasks.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for document
|
||||
processing task management, including task creation, progress tracking, and chunk
|
||||
management. It handles various document types (PDF, Excel, etc.) and manages their
|
||||
processing lifecycle.
|
||||
|
||||
The class implements a robust task queue system with retry mechanisms and progress
|
||||
tracking, supporting both synchronous and asynchronous task execution.
|
||||
|
||||
Attributes:
|
||||
model: The Task model class for database operations.
|
||||
"""
|
||||
model = Task
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_task(cls, task_id, doc_ids=[]):
|
||||
"""Retrieve detailed task information by task ID.
|
||||
|
||||
This method fetches comprehensive task details including associated document,
|
||||
knowledge base, and tenant information. It also handles task retry logic and
|
||||
progress updates.
|
||||
|
||||
Args:
|
||||
task_id (str): The unique identifier of the task to retrieve.
|
||||
|
||||
Returns:
|
||||
dict: Task details dictionary containing all task information and related metadata.
|
||||
Returns None if task is not found or has exceeded retry limit.
|
||||
"""
|
||||
doc_id = cls.model.doc_id
|
||||
if doc_id == CANVAS_DEBUG_DOC_ID and doc_ids:
|
||||
doc_id = doc_ids[0]
|
||||
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.doc_id,
|
||||
cls.model.from_page,
|
||||
cls.model.to_page,
|
||||
cls.model.retry_count,
|
||||
Document.kb_id,
|
||||
Document.parser_id,
|
||||
Document.parser_config,
|
||||
Document.name,
|
||||
Document.type,
|
||||
Document.location,
|
||||
Document.size,
|
||||
Knowledgebase.tenant_id,
|
||||
Knowledgebase.language,
|
||||
Knowledgebase.embd_id,
|
||||
Knowledgebase.pagerank,
|
||||
Knowledgebase.parser_config.alias("kb_parser_config"),
|
||||
Tenant.img2txt_id,
|
||||
Tenant.asr_id,
|
||||
Tenant.llm_id,
|
||||
cls.model.update_time,
|
||||
]
|
||||
docs = (
|
||||
cls.model.select(*fields)
|
||||
.join(Document, on=(doc_id == Document.id))
|
||||
.join(Knowledgebase, on=(Document.kb_id == Knowledgebase.id))
|
||||
.join(Tenant, on=(Knowledgebase.tenant_id == Tenant.id))
|
||||
.where(cls.model.id == task_id)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs:
|
||||
return None
|
||||
|
||||
msg = f"\n{datetime.now().strftime('%H:%M:%S')} Task has been received."
|
||||
prog = random.random() / 10.0
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
msg = "\nERROR: Task is abandoned after 3 times attempts."
|
||||
prog = -1
|
||||
|
||||
cls.model.update(
|
||||
progress_msg=cls.model.progress_msg + msg,
|
||||
progress=prog,
|
||||
retry_count=docs[0]["retry_count"] + 1,
|
||||
).where(cls.model.id == docs[0]["id"]).execute()
|
||||
|
||||
if docs[0]["retry_count"] >= 3:
|
||||
return None
|
||||
|
||||
return docs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tasks(cls, doc_id: str):
|
||||
"""Retrieve all tasks associated with a document.
|
||||
|
||||
This method fetches all processing tasks for a given document, ordered by page
|
||||
number and creation time. It includes task progress and chunk information.
|
||||
|
||||
Args:
|
||||
doc_id (str): The unique identifier of the document.
|
||||
|
||||
Returns:
|
||||
list[dict]: List of task dictionaries containing task details.
|
||||
Returns None if no tasks are found.
|
||||
"""
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.from_page,
|
||||
cls.model.progress,
|
||||
cls.model.digest,
|
||||
cls.model.chunk_ids,
|
||||
]
|
||||
tasks = (
|
||||
cls.model.select(*fields).order_by(cls.model.from_page.asc(), cls.model.create_time.desc())
|
||||
.where(cls.model.doc_id == doc_id)
|
||||
)
|
||||
tasks = list(tasks.dicts())
|
||||
if not tasks:
|
||||
return None
|
||||
return tasks
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_chunk_ids(cls, id: str, chunk_ids: str):
|
||||
"""Update the chunk IDs associated with a task.
|
||||
|
||||
This method updates the chunk_ids field of a task, which stores the IDs of
|
||||
processed document chunks in a space-separated string format.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task.
|
||||
chunk_ids (str): Space-separated string of chunk identifiers.
|
||||
"""
|
||||
cls.model.update(chunk_ids=chunk_ids).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_ongoing_doc_name(cls):
|
||||
"""Get names of documents that are currently being processed.
|
||||
|
||||
This method retrieves information about documents that are in the processing state,
|
||||
including their locations and associated IDs. It uses database locking to ensure
|
||||
thread safety when accessing the task information.
|
||||
|
||||
Returns:
|
||||
list[tuple]: A list of tuples, each containing (parent_id/kb_id, location)
|
||||
for documents currently being processed. Returns empty list if
|
||||
no documents are being processed.
|
||||
"""
|
||||
with DB.lock("get_task", -1):
|
||||
docs = (
|
||||
cls.model.select(
|
||||
*[Document.id, Document.kb_id, Document.location, File.parent_id]
|
||||
)
|
||||
.join(Document, on=(cls.model.doc_id == Document.id))
|
||||
.join(
|
||||
File2Document,
|
||||
on=(File2Document.document_id == Document.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.join(
|
||||
File,
|
||||
on=(File2Document.file_id == File.id),
|
||||
join_type=JOIN.LEFT_OUTER,
|
||||
)
|
||||
.where(
|
||||
Document.status == StatusEnum.VALID.value,
|
||||
Document.run == TaskStatus.RUNNING.value,
|
||||
~(Document.type == FileType.VIRTUAL.value),
|
||||
cls.model.progress < 1,
|
||||
cls.model.create_time >= current_timestamp() - 1000 * 600,
|
||||
)
|
||||
)
|
||||
docs = list(docs.dicts())
|
||||
if not docs:
|
||||
return []
|
||||
|
||||
return list(
|
||||
set(
|
||||
[
|
||||
(
|
||||
d["parent_id"] if d["parent_id"] else d["kb_id"],
|
||||
d["location"],
|
||||
)
|
||||
for d in docs
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def do_cancel(cls, id):
|
||||
"""Check if a task should be cancelled based on its document status.
|
||||
|
||||
This method determines whether a task should be cancelled by checking the
|
||||
associated document's run status and progress. A task should be cancelled
|
||||
if its document is marked for cancellation or has negative progress.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task to check.
|
||||
|
||||
Returns:
|
||||
bool: True if the task should be cancelled, False otherwise.
|
||||
"""
|
||||
task = cls.model.get_by_id(id)
|
||||
_, doc = DocumentService.get_by_id(task.doc_id)
|
||||
return doc.run == TaskStatus.CANCEL.value or doc.progress < 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_progress(cls, id, info):
|
||||
"""Update the progress information for a task.
|
||||
|
||||
This method updates both the progress message and completion percentage of a task.
|
||||
It handles platform-specific behavior (macOS vs others) and uses database locking
|
||||
when necessary to ensure thread safety.
|
||||
|
||||
Update Rules:
|
||||
- progress_msg: Always appends the new message to the existing one, and trims the result to max 3000 lines.
|
||||
- progress: Only updates if the current progress is not -1 AND
|
||||
(the new progress is -1 OR greater than the existing progress),
|
||||
to avoid overwriting valid progress with invalid or regressive values.
|
||||
|
||||
Args:
|
||||
id (str): The unique identifier of the task to update.
|
||||
info (dict): Dictionary containing progress information with keys:
|
||||
- progress_msg (str, optional): Progress message to append
|
||||
- progress (float, optional): Progress percentage (0.0 to 1.0)
|
||||
"""
|
||||
task = cls.model.get_by_id(id)
|
||||
if not task:
|
||||
logging.warning("Update_progress error: task not found")
|
||||
return
|
||||
|
||||
if os.environ.get("MACOS"):
|
||||
if info["progress_msg"]:
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
prog = info["progress"]
|
||||
cls.model.update(progress=prog).where(
|
||||
(cls.model.id == id) &
|
||||
(
|
||||
(cls.model.progress != -1) &
|
||||
((prog == -1) | (prog > cls.model.progress))
|
||||
)
|
||||
).execute()
|
||||
else:
|
||||
with DB.lock("update_progress", -1):
|
||||
if info["progress_msg"]:
|
||||
progress_msg = trim_header_by_lines(task.progress_msg + "\n" + info["progress_msg"], 3000)
|
||||
cls.model.update(progress_msg=progress_msg).where(cls.model.id == id).execute()
|
||||
if "progress" in info:
|
||||
prog = info["progress"]
|
||||
cls.model.update(progress=prog).where(
|
||||
(cls.model.id == id) &
|
||||
(
|
||||
(cls.model.progress != -1) &
|
||||
((prog == -1) | (prog > cls.model.progress))
|
||||
)
|
||||
).execute()
|
||||
|
||||
process_duration = (datetime.now() - task.begin_at).total_seconds()
|
||||
cls.model.update(process_duration=process_duration).where(cls.model.id == id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_doc_ids(cls, doc_ids):
|
||||
"""Delete task associated with a document."""
|
||||
return cls.model.delete().where(cls.model.doc_id.in_(doc_ids)).execute()
|
||||
|
||||
|
||||
def queue_tasks(doc: dict, bucket: str, name: str, priority: int):
|
||||
"""Create and queue document processing tasks.
|
||||
|
||||
This function creates processing tasks for a document based on its type and configuration.
|
||||
It handles different document types (PDF, Excel, etc.) differently and manages task
|
||||
chunking and configuration. It also implements task reuse optimization by checking
|
||||
for previously completed tasks.
|
||||
|
||||
Args:
|
||||
doc (dict): Document dictionary containing metadata and configuration.
|
||||
bucket (str): Storage bucket name where the document is stored.
|
||||
name (str): File name of the document.
|
||||
priority (int, optional): Priority level for task queueing (default is 0).
|
||||
|
||||
Note:
|
||||
- For PDF documents, tasks are created per page range based on configuration
|
||||
- For Excel documents, tasks are created per row range
|
||||
- Task digests are calculated for optimization and reuse
|
||||
- Previous task chunks may be reused if available
|
||||
"""
|
||||
def new_task():
|
||||
return {
|
||||
"id": get_uuid(),
|
||||
"doc_id": doc["id"],
|
||||
"progress": 0.0,
|
||||
"from_page": 0,
|
||||
"to_page": 100000000,
|
||||
"begin_at": datetime.now(),
|
||||
}
|
||||
|
||||
parse_task_array = []
|
||||
|
||||
if doc["type"] == FileType.PDF.value:
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
do_layout = doc["parser_config"].get("layout_recognize", "DeepDOC")
|
||||
pages = PdfParser.total_page_number(doc["name"], file_bin)
|
||||
if pages is None:
|
||||
pages = 0
|
||||
page_size = doc["parser_config"].get("task_page_size") or 12
|
||||
if doc["parser_id"] == "paper":
|
||||
page_size = doc["parser_config"].get("task_page_size") or 22
|
||||
if doc["parser_id"] in ["one", "knowledge_graph"] or do_layout != "DeepDOC" or doc["parser_config"].get("toc", True):
|
||||
page_size = 10 ** 9
|
||||
page_ranges = doc["parser_config"].get("pages") or [(1, 10 ** 5)]
|
||||
for s, e in page_ranges:
|
||||
s -= 1
|
||||
s = max(0, s)
|
||||
e = min(e - 1, pages)
|
||||
for p in range(s, e, page_size):
|
||||
task = new_task()
|
||||
task["from_page"] = p
|
||||
task["to_page"] = min(p + page_size, e)
|
||||
parse_task_array.append(task)
|
||||
|
||||
elif doc["parser_id"] == "table":
|
||||
file_bin = STORAGE_IMPL.get(bucket, name)
|
||||
rn = RAGFlowExcelParser.row_number(doc["name"], file_bin)
|
||||
for i in range(0, rn, 3000):
|
||||
task = new_task()
|
||||
task["from_page"] = i
|
||||
task["to_page"] = min(i + 3000, rn)
|
||||
parse_task_array.append(task)
|
||||
else:
|
||||
parse_task_array.append(new_task())
|
||||
|
||||
chunking_config = DocumentService.get_chunking_config(doc["id"])
|
||||
for task in parse_task_array:
|
||||
hasher = xxhash.xxh64()
|
||||
for field in sorted(chunking_config.keys()):
|
||||
if field == "parser_config":
|
||||
for k in ["raptor", "graphrag"]:
|
||||
if k in chunking_config[field]:
|
||||
del chunking_config[field][k]
|
||||
hasher.update(str(chunking_config[field]).encode("utf-8"))
|
||||
for field in ["doc_id", "from_page", "to_page"]:
|
||||
hasher.update(str(task.get(field, "")).encode("utf-8"))
|
||||
task_digest = hasher.hexdigest()
|
||||
task["digest"] = task_digest
|
||||
task["progress"] = 0.0
|
||||
task["priority"] = priority
|
||||
|
||||
prev_tasks = TaskService.get_tasks(doc["id"])
|
||||
ck_num = 0
|
||||
if prev_tasks:
|
||||
for task in parse_task_array:
|
||||
ck_num += reuse_prev_task_chunks(task, prev_tasks, chunking_config)
|
||||
TaskService.filter_delete([Task.doc_id == doc["id"]])
|
||||
pre_chunk_ids = []
|
||||
for pre_task in prev_tasks:
|
||||
if pre_task["chunk_ids"]:
|
||||
pre_chunk_ids.extend(pre_task["chunk_ids"].split())
|
||||
if pre_chunk_ids:
|
||||
settings.docStoreConn.delete({"id": pre_chunk_ids}, search.index_name(chunking_config["tenant_id"]),
|
||||
chunking_config["kb_id"])
|
||||
DocumentService.update_by_id(doc["id"], {"chunk_num": ck_num})
|
||||
|
||||
bulk_insert_into_db(Task, parse_task_array, True)
|
||||
DocumentService.begin2parse(doc["id"])
|
||||
|
||||
unfinished_task_array = [task for task in parse_task_array if task["progress"] < 1.0]
|
||||
for unfinished_task in unfinished_task_array:
|
||||
assert REDIS_CONN.queue_product(
|
||||
get_svr_queue_name(priority), message=unfinished_task
|
||||
), "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
|
||||
def reuse_prev_task_chunks(task: dict, prev_tasks: list[dict], chunking_config: dict):
|
||||
"""Attempt to reuse chunks from previous tasks for optimization.
|
||||
|
||||
This function checks if chunks from previously completed tasks can be reused for
|
||||
the current task, which can significantly improve processing efficiency. It matches
|
||||
tasks based on page ranges and configuration digests.
|
||||
|
||||
Args:
|
||||
task (dict): Current task dictionary to potentially reuse chunks for.
|
||||
prev_tasks (list[dict]): List of previous task dictionaries to check for reuse.
|
||||
chunking_config (dict): Configuration dictionary for chunk processing.
|
||||
|
||||
Returns:
|
||||
int: Number of chunks successfully reused. Returns 0 if no chunks could be reused.
|
||||
|
||||
Note:
|
||||
Chunks can only be reused if:
|
||||
- A previous task exists with matching page range and configuration digest
|
||||
- The previous task was completed successfully (progress = 1.0)
|
||||
- The previous task has valid chunk IDs
|
||||
"""
|
||||
idx = 0
|
||||
while idx < len(prev_tasks):
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task.get("from_page", 0) == task.get("from_page", 0) \
|
||||
and prev_task.get("digest", 0) == task.get("digest", ""):
|
||||
break
|
||||
idx += 1
|
||||
|
||||
if idx >= len(prev_tasks):
|
||||
return 0
|
||||
prev_task = prev_tasks[idx]
|
||||
if prev_task["progress"] < 1.0 or not prev_task["chunk_ids"]:
|
||||
return 0
|
||||
task["chunk_ids"] = prev_task["chunk_ids"]
|
||||
task["progress"] = 1.0
|
||||
if "from_page" in task and "to_page" in task and int(task['to_page']) - int(task['from_page']) >= 10 ** 6:
|
||||
task["progress_msg"] = f"Page({task['from_page']}~{task['to_page']}): "
|
||||
else:
|
||||
task["progress_msg"] = ""
|
||||
task["progress_msg"] = " ".join(
|
||||
[datetime.now().strftime("%H:%M:%S"), task["progress_msg"], "Reused previous task's chunks."])
|
||||
prev_task["chunk_ids"] = ""
|
||||
|
||||
return len(task["chunk_ids"].split())
|
||||
|
||||
|
||||
def cancel_all_task_of(doc_id):
|
||||
for t in TaskService.query(doc_id=doc_id):
|
||||
try:
|
||||
REDIS_CONN.set(f"{t.id}-cancel", "x")
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
|
||||
|
||||
def has_canceled(task_id):
|
||||
try:
|
||||
if REDIS_CONN.get(f"{task_id}-cancel"):
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return False
|
||||
|
||||
|
||||
def queue_dataflow(tenant_id:str, flow_id:str, task_id:str, doc_id:str=CANVAS_DEBUG_DOC_ID, file:dict=None, priority: int=0, rerun:bool=False) -> tuple[bool, str]:
|
||||
|
||||
task = dict(
|
||||
id=task_id,
|
||||
doc_id=doc_id,
|
||||
from_page=0,
|
||||
to_page=100000000,
|
||||
task_type="dataflow" if not rerun else "dataflow_rerun",
|
||||
priority=priority,
|
||||
begin_at=datetime.now(),
|
||||
)
|
||||
if doc_id not in [CANVAS_DEBUG_DOC_ID, GRAPH_RAPTOR_FAKE_DOC_ID]:
|
||||
TaskService.model.delete().where(TaskService.model.doc_id == doc_id).execute()
|
||||
DocumentService.begin2parse(doc_id)
|
||||
bulk_insert_into_db(model=Task, data_source=[task], replace_on_conflict=True)
|
||||
|
||||
task["kb_id"] = DocumentService.get_knowledgebase_id(doc_id)
|
||||
task["tenant_id"] = tenant_id
|
||||
task["dataflow_id"] = flow_id
|
||||
task["file"] = file
|
||||
|
||||
if not REDIS_CONN.queue_product(
|
||||
get_svr_queue_name(priority), message=task
|
||||
):
|
||||
return False, "Can't access Redis. Please check the Redis' status."
|
||||
|
||||
return True, ""
|
||||
257
api/db/services/tenant_llm_service.py
Normal file
257
api/db/services/tenant_llm_service.py
Normal file
@@ -0,0 +1,257 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import logging
|
||||
from langfuse import Langfuse
|
||||
from api import settings
|
||||
from api.db import LLMType
|
||||
from api.db.db_models import DB, LLMFactories, TenantLLM
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.db.services.langfuse_service import TenantLangfuseService
|
||||
from api.db.services.user_service import TenantService
|
||||
from rag.llm import ChatModel, CvModel, EmbeddingModel, RerankModel, Seq2txtModel, TTSModel
|
||||
|
||||
|
||||
class LLMFactoriesService(CommonService):
|
||||
model = LLMFactories
|
||||
|
||||
|
||||
class TenantLLMService(CommonService):
|
||||
model = TenantLLM
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_api_key(cls, tenant_id, model_name):
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(model_name)
|
||||
if not fid:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm)
|
||||
else:
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
|
||||
|
||||
if (not objs) and fid:
|
||||
if fid == "LocalAI":
|
||||
mdlnm += "___LocalAI"
|
||||
elif fid == "HuggingFace":
|
||||
mdlnm += "___HuggingFace"
|
||||
elif fid == "OpenAI-API-Compatible":
|
||||
mdlnm += "___OpenAI-API"
|
||||
elif fid == "VLLM":
|
||||
mdlnm += "___VLLM"
|
||||
objs = cls.query(tenant_id=tenant_id, llm_name=mdlnm, llm_factory=fid)
|
||||
if not objs:
|
||||
return
|
||||
return objs[0]
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_my_llms(cls, tenant_id):
|
||||
fields = [cls.model.llm_factory, LLMFactories.logo, LLMFactories.tags, cls.model.model_type, cls.model.llm_name, cls.model.used_tokens]
|
||||
objs = cls.model.select(*fields).join(LLMFactories, on=(cls.model.llm_factory == LLMFactories.name)).where(cls.model.tenant_id == tenant_id, ~cls.model.api_key.is_null()).dicts()
|
||||
|
||||
return list(objs)
|
||||
|
||||
@staticmethod
|
||||
def split_model_name_and_factory(model_name):
|
||||
arr = model_name.split("@")
|
||||
if len(arr) < 2:
|
||||
return model_name, None
|
||||
if len(arr) > 2:
|
||||
return "@".join(arr[0:-1]), arr[-1]
|
||||
|
||||
# model name must be xxx@yyy
|
||||
try:
|
||||
model_factories = settings.FACTORY_LLM_INFOS
|
||||
model_providers = set([f["name"] for f in model_factories])
|
||||
if arr[-1] not in model_providers:
|
||||
return model_name, None
|
||||
return arr[0], arr[-1]
|
||||
except Exception as e:
|
||||
logging.exception(f"TenantLLMService.split_model_name_and_factory got exception: {e}")
|
||||
return model_name, None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_model_config(cls, tenant_id, llm_type, llm_name=None):
|
||||
from api.db.services.llm_service import LLMService
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
raise LookupError("Tenant not found")
|
||||
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
mdlnm = tenant.embd_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.SPEECH2TEXT.value:
|
||||
mdlnm = tenant.asr_id
|
||||
elif llm_type == LLMType.IMAGE2TEXT.value:
|
||||
mdlnm = tenant.img2txt_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.CHAT.value:
|
||||
mdlnm = tenant.llm_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.RERANK:
|
||||
mdlnm = tenant.rerank_id if not llm_name else llm_name
|
||||
elif llm_type == LLMType.TTS:
|
||||
mdlnm = tenant.tts_id if not llm_name else llm_name
|
||||
else:
|
||||
assert False, "LLM type error"
|
||||
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
mdlnm, fid = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
if not model_config: # for some cases seems fid mismatch
|
||||
model_config = cls.get_api_key(tenant_id, mdlnm)
|
||||
if model_config:
|
||||
model_config = model_config.to_dict()
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if not llm and fid: # for some cases seems fid mismatch
|
||||
llm = LLMService.query(llm_name=mdlnm)
|
||||
if llm:
|
||||
model_config["is_tools"] = llm[0].is_tools
|
||||
if not model_config:
|
||||
if llm_type in [LLMType.EMBEDDING, LLMType.RERANK]:
|
||||
llm = LLMService.query(llm_name=mdlnm) if not fid else LLMService.query(llm_name=mdlnm, fid=fid)
|
||||
if llm and llm[0].fid in ["Youdao", "FastEmbed", "BAAI"]:
|
||||
model_config = {"llm_factory": llm[0].fid, "api_key": "", "llm_name": mdlnm, "api_base": ""}
|
||||
if not model_config:
|
||||
if mdlnm == "flag-embedding":
|
||||
model_config = {"llm_factory": "Tongyi-Qianwen", "api_key": "", "llm_name": llm_name, "api_base": ""}
|
||||
else:
|
||||
if not mdlnm:
|
||||
raise LookupError(f"Type of {llm_type} model is not set.")
|
||||
raise LookupError("Model({}) not authorized".format(mdlnm))
|
||||
return model_config
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def model_instance(cls, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
kwargs.update({"provider": model_config["llm_factory"]})
|
||||
if llm_type == LLMType.EMBEDDING.value:
|
||||
if model_config["llm_factory"] not in EmbeddingModel:
|
||||
return
|
||||
return EmbeddingModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.RERANK:
|
||||
if model_config["llm_factory"] not in RerankModel:
|
||||
return
|
||||
return RerankModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"])
|
||||
|
||||
if llm_type == LLMType.IMAGE2TEXT.value:
|
||||
if model_config["llm_factory"] not in CvModel:
|
||||
return
|
||||
return CvModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], lang, base_url=model_config["api_base"], **kwargs)
|
||||
|
||||
if llm_type == LLMType.CHAT.value:
|
||||
if model_config["llm_factory"] not in ChatModel:
|
||||
return
|
||||
return ChatModel[model_config["llm_factory"]](model_config["api_key"], model_config["llm_name"], base_url=model_config["api_base"], **kwargs)
|
||||
|
||||
if llm_type == LLMType.SPEECH2TEXT:
|
||||
if model_config["llm_factory"] not in Seq2txtModel:
|
||||
return
|
||||
return Seq2txtModel[model_config["llm_factory"]](key=model_config["api_key"], model_name=model_config["llm_name"], lang=lang, base_url=model_config["api_base"])
|
||||
if llm_type == LLMType.TTS:
|
||||
if model_config["llm_factory"] not in TTSModel:
|
||||
return
|
||||
return TTSModel[model_config["llm_factory"]](
|
||||
model_config["api_key"],
|
||||
model_config["llm_name"],
|
||||
base_url=model_config["api_base"],
|
||||
)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def increase_usage(cls, tenant_id, llm_type, used_tokens, llm_name=None):
|
||||
e, tenant = TenantService.get_by_id(tenant_id)
|
||||
if not e:
|
||||
logging.error(f"Tenant not found: {tenant_id}")
|
||||
return 0
|
||||
|
||||
llm_map = {
|
||||
LLMType.EMBEDDING.value: tenant.embd_id if not llm_name else llm_name,
|
||||
LLMType.SPEECH2TEXT.value: tenant.asr_id,
|
||||
LLMType.IMAGE2TEXT.value: tenant.img2txt_id,
|
||||
LLMType.CHAT.value: tenant.llm_id if not llm_name else llm_name,
|
||||
LLMType.RERANK.value: tenant.rerank_id if not llm_name else llm_name,
|
||||
LLMType.TTS.value: tenant.tts_id if not llm_name else llm_name,
|
||||
}
|
||||
|
||||
mdlnm = llm_map.get(llm_type)
|
||||
if mdlnm is None:
|
||||
logging.error(f"LLM type error: {llm_type}")
|
||||
return 0
|
||||
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(mdlnm)
|
||||
|
||||
try:
|
||||
num = (
|
||||
cls.model.update(used_tokens=cls.model.used_tokens + used_tokens)
|
||||
.where(cls.model.tenant_id == tenant_id, cls.model.llm_name == llm_name, cls.model.llm_factory == llm_factory if llm_factory else True)
|
||||
.execute()
|
||||
)
|
||||
except Exception:
|
||||
logging.exception("TenantLLMService.increase_usage got exception,Failed to update used_tokens for tenant_id=%s, llm_name=%s", tenant_id, llm_name)
|
||||
return 0
|
||||
|
||||
return num
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_openai_models(cls):
|
||||
objs = cls.model.select().where((cls.model.llm_factory == "OpenAI"), ~(cls.model.llm_name == "text-embedding-3-small"), ~(cls.model.llm_name == "text-embedding-3-large")).dicts()
|
||||
return list(objs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_by_tenant_id(cls, tenant_id):
|
||||
return cls.model.delete().where(cls.model.tenant_id == tenant_id).execute()
|
||||
|
||||
@staticmethod
|
||||
def llm_id2llm_type(llm_id: str) -> str | None:
|
||||
from api.db.services.llm_service import LLMService
|
||||
llm_id, *_ = TenantLLMService.split_model_name_and_factory(llm_id)
|
||||
llm_factories = settings.FACTORY_LLM_INFOS
|
||||
for llm_factory in llm_factories:
|
||||
for llm in llm_factory["llm"]:
|
||||
if llm_id == llm["llm_name"]:
|
||||
return llm["model_type"].split(",")[-1]
|
||||
|
||||
for llm in LLMService.query(llm_name=llm_id):
|
||||
return llm.model_type
|
||||
|
||||
llm = TenantLLMService.get_or_none(llm_name=llm_id)
|
||||
if llm:
|
||||
return llm.model_type
|
||||
for llm in TenantLLMService.query(llm_name=llm_id):
|
||||
return llm.model_type
|
||||
|
||||
|
||||
class LLM4Tenant:
|
||||
def __init__(self, tenant_id, llm_type, llm_name=None, lang="Chinese", **kwargs):
|
||||
self.tenant_id = tenant_id
|
||||
self.llm_type = llm_type
|
||||
self.llm_name = llm_name
|
||||
self.mdl = TenantLLMService.model_instance(tenant_id, llm_type, llm_name, lang=lang, **kwargs)
|
||||
assert self.mdl, "Can't find model for {}/{}/{}".format(tenant_id, llm_type, llm_name)
|
||||
model_config = TenantLLMService.get_model_config(tenant_id, llm_type, llm_name)
|
||||
self.max_length = model_config.get("max_tokens", 8192)
|
||||
|
||||
self.is_tools = model_config.get("is_tools", False)
|
||||
self.verbose_tool_use = kwargs.get("verbose_tool_use")
|
||||
|
||||
langfuse_keys = TenantLangfuseService.filter_by_tenant(tenant_id=tenant_id)
|
||||
self.langfuse = None
|
||||
if langfuse_keys:
|
||||
langfuse = Langfuse(public_key=langfuse_keys.public_key, secret_key=langfuse_keys.secret_key, host=langfuse_keys.host)
|
||||
if langfuse.auth_check():
|
||||
self.langfuse = langfuse
|
||||
trace_id = self.langfuse.create_trace_id()
|
||||
self.trace_context = {"trace_id": trace_id}
|
||||
63
api/db/services/user_canvas_version.py
Normal file
63
api/db/services/user_canvas_version.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from api.db.db_models import UserCanvasVersion, DB
|
||||
from api.db.services.common_service import CommonService
|
||||
from peewee import DoesNotExist
|
||||
|
||||
class UserCanvasVersionService(CommonService):
|
||||
model = UserCanvasVersion
|
||||
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def list_by_canvas_id(cls, user_canvas_id):
|
||||
try:
|
||||
user_canvas_version = cls.model.select(
|
||||
*[cls.model.id,
|
||||
cls.model.create_time,
|
||||
cls.model.title,
|
||||
cls.model.create_date,
|
||||
cls.model.update_date,
|
||||
cls.model.user_canvas_id,
|
||||
cls.model.update_time]
|
||||
).where(cls.model.user_canvas_id == user_canvas_id)
|
||||
return user_canvas_version
|
||||
except DoesNotExist:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_canvas_version_by_canvas_ids(cls, canvas_ids):
|
||||
fields = [cls.model.id]
|
||||
versions = cls.model.select(*fields).where(cls.model.user_canvas_id.in_(canvas_ids))
|
||||
versions.order_by(cls.model.create_time.asc())
|
||||
offset, limit = 0, 100
|
||||
res = []
|
||||
while True:
|
||||
version_batch = versions.offset(offset).limit(limit)
|
||||
_temp = list(version_batch.dicts())
|
||||
if not _temp:
|
||||
break
|
||||
res.extend(_temp)
|
||||
offset += limit
|
||||
return res
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_all_versions(cls, user_canvas_id):
|
||||
try:
|
||||
user_canvas_version = cls.model.select().where(cls.model.user_canvas_id == user_canvas_id).order_by(cls.model.create_time.desc())
|
||||
if user_canvas_version.count() > 20:
|
||||
delete_ids = []
|
||||
for i in range(20, user_canvas_version.count()):
|
||||
delete_ids.append(user_canvas_version[i].id)
|
||||
|
||||
cls.delete_by_ids(delete_ids)
|
||||
return True
|
||||
except DoesNotExist:
|
||||
return None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
|
||||
318
api/db/services/user_service.py
Normal file
318
api/db/services/user_service.py
Normal file
@@ -0,0 +1,318 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import hashlib
|
||||
from datetime import datetime
|
||||
import logging
|
||||
|
||||
import peewee
|
||||
from werkzeug.security import generate_password_hash, check_password_hash
|
||||
|
||||
from api.db import UserTenantRole
|
||||
from api.db.db_models import DB, UserTenant
|
||||
from api.db.db_models import User, Tenant
|
||||
from api.db.services.common_service import CommonService
|
||||
from api.utils import get_uuid, current_timestamp, datetime_format
|
||||
from api.db import StatusEnum
|
||||
from rag.settings import MINIO
|
||||
|
||||
|
||||
class UserService(CommonService):
|
||||
"""Service class for managing user-related database operations.
|
||||
|
||||
This class extends CommonService to provide specialized functionality for user management,
|
||||
including authentication, user creation, updates, and deletions.
|
||||
|
||||
Attributes:
|
||||
model: The User model class for database operations.
|
||||
"""
|
||||
model = User
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query(cls, cols=None, reverse=None, order_by=None, **kwargs):
|
||||
if 'access_token' in kwargs:
|
||||
access_token = kwargs['access_token']
|
||||
|
||||
# Reject empty, None, or whitespace-only access tokens
|
||||
if not access_token or not str(access_token).strip():
|
||||
logging.warning("UserService.query: Rejecting empty access_token query")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_EMPTY_TOKEN") # Returns empty result
|
||||
|
||||
# Reject tokens that are too short (should be UUID, 32+ chars)
|
||||
if len(str(access_token).strip()) < 32:
|
||||
logging.warning(f"UserService.query: Rejecting short access_token query: {len(str(access_token))} chars")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_SHORT_TOKEN") # Returns empty result
|
||||
|
||||
# Reject tokens that start with "INVALID_" (from logout)
|
||||
if str(access_token).startswith("INVALID_"):
|
||||
logging.warning("UserService.query: Rejecting invalidated access_token")
|
||||
return cls.model.select().where(cls.model.id == "INVALID_LOGOUT_TOKEN") # Returns empty result
|
||||
|
||||
# Call parent query method for valid requests
|
||||
return super().query(cols=cols, reverse=reverse, order_by=order_by, **kwargs)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_id(cls, user_id):
|
||||
"""Retrieve a user by their ID.
|
||||
|
||||
Args:
|
||||
user_id: The unique identifier of the user.
|
||||
|
||||
Returns:
|
||||
User object if found, None otherwise.
|
||||
"""
|
||||
try:
|
||||
user = cls.model.select().where(cls.model.id == user_id).get()
|
||||
return user
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query_user(cls, email, password):
|
||||
"""Authenticate a user with email and password.
|
||||
|
||||
Args:
|
||||
email: User's email address.
|
||||
password: User's password in plain text.
|
||||
|
||||
Returns:
|
||||
User object if authentication successful, None otherwise.
|
||||
"""
|
||||
user = cls.model.select().where((cls.model.email == email),
|
||||
(cls.model.status == StatusEnum.VALID.value)).first()
|
||||
if user and check_password_hash(str(user.password), password):
|
||||
return user
|
||||
else:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def query_user_by_email(cls, email):
|
||||
users = cls.model.select().where((cls.model.email == email))
|
||||
return list(users)
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
if "password" in kwargs:
|
||||
kwargs["password"] = generate_password_hash(
|
||||
str(kwargs["password"]))
|
||||
|
||||
kwargs["create_time"] = current_timestamp()
|
||||
kwargs["create_date"] = datetime_format(datetime.now())
|
||||
kwargs["update_time"] = current_timestamp()
|
||||
kwargs["update_date"] = datetime_format(datetime.now())
|
||||
obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def delete_user(cls, user_ids, update_user_dict):
|
||||
with DB.atomic():
|
||||
cls.model.update({"status": 0}).where(
|
||||
cls.model.id.in_(user_ids)).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_user(cls, user_id, user_dict):
|
||||
with DB.atomic():
|
||||
if user_dict:
|
||||
user_dict["update_time"] = current_timestamp()
|
||||
user_dict["update_date"] = datetime_format(datetime.now())
|
||||
cls.model.update(user_dict).where(
|
||||
cls.model.id == user_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def update_user_password(cls, user_id, new_password):
|
||||
with DB.atomic():
|
||||
update_dict = {
|
||||
"password": generate_password_hash(str(new_password)),
|
||||
"update_time": current_timestamp(),
|
||||
"update_date": datetime_format(datetime.now())
|
||||
}
|
||||
cls.model.update(update_dict).where(cls.model.id == user_id).execute()
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def is_admin(cls, user_id):
|
||||
return cls.model.select().where(
|
||||
cls.model.id == user_id,
|
||||
cls.model.is_superuser == 1).count() > 0
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_all_users(cls):
|
||||
users = cls.model.select()
|
||||
return list(users)
|
||||
|
||||
|
||||
class TenantService(CommonService):
|
||||
"""Service class for managing tenant-related database operations.
|
||||
|
||||
This class extends CommonService to provide functionality for tenant management,
|
||||
including tenant information retrieval and credit management.
|
||||
|
||||
Attributes:
|
||||
model: The Tenant model class for database operations.
|
||||
"""
|
||||
model = Tenant
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_info_by(cls, user_id):
|
||||
fields = [
|
||||
cls.model.id.alias("tenant_id"),
|
||||
cls.model.name,
|
||||
cls.model.llm_id,
|
||||
cls.model.embd_id,
|
||||
cls.model.rerank_id,
|
||||
cls.model.asr_id,
|
||||
cls.model.img2txt_id,
|
||||
cls.model.tts_id,
|
||||
cls.model.parser_ids,
|
||||
UserTenant.role]
|
||||
return list(cls.model.select(*fields)
|
||||
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value) & (UserTenant.role == UserTenantRole.OWNER)))
|
||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_joined_tenants_by_user_id(cls, user_id):
|
||||
fields = [
|
||||
cls.model.id.alias("tenant_id"),
|
||||
cls.model.name,
|
||||
cls.model.llm_id,
|
||||
cls.model.embd_id,
|
||||
cls.model.asr_id,
|
||||
cls.model.img2txt_id,
|
||||
UserTenant.role]
|
||||
return list(cls.model.select(*fields)
|
||||
.join(UserTenant, on=((cls.model.id == UserTenant.tenant_id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value) & (UserTenant.role == UserTenantRole.NORMAL)))
|
||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def decrease(cls, user_id, num):
|
||||
num = cls.model.update(credit=cls.model.credit - num).where(
|
||||
cls.model.id == user_id).execute()
|
||||
if num == 0:
|
||||
raise LookupError("Tenant not found which is supposed to be there")
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def user_gateway(cls, tenant_id):
|
||||
hashobj = hashlib.sha256(tenant_id.encode("utf-8"))
|
||||
return int(hashobj.hexdigest(), 16)%len(MINIO)
|
||||
|
||||
|
||||
class UserTenantService(CommonService):
|
||||
"""Service class for managing user-tenant relationship operations.
|
||||
|
||||
This class extends CommonService to handle the many-to-many relationship
|
||||
between users and tenants, managing user roles and tenant memberships.
|
||||
|
||||
Attributes:
|
||||
model: The UserTenant model class for database operations.
|
||||
"""
|
||||
model = UserTenant
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_id(cls, user_tenant_id):
|
||||
try:
|
||||
user_tenant = cls.model.select().where((cls.model.id == user_tenant_id) & (cls.model.status == StatusEnum.VALID.value)).get()
|
||||
return user_tenant
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def save(cls, **kwargs):
|
||||
if "id" not in kwargs:
|
||||
kwargs["id"] = get_uuid()
|
||||
obj = cls.model(**kwargs).save(force_insert=True)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_by_tenant_id(cls, tenant_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.user_id,
|
||||
cls.model.status,
|
||||
cls.model.role,
|
||||
User.nickname,
|
||||
User.email,
|
||||
User.avatar,
|
||||
User.is_authenticated,
|
||||
User.is_active,
|
||||
User.is_anonymous,
|
||||
User.status,
|
||||
User.update_date,
|
||||
User.is_superuser]
|
||||
return list(cls.model.select(*fields)
|
||||
.join(User, on=((cls.model.user_id == User.id) & (cls.model.status == StatusEnum.VALID.value) & (cls.model.role != UserTenantRole.OWNER)))
|
||||
.where(cls.model.tenant_id == tenant_id)
|
||||
.dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_tenants_by_user_id(cls, user_id):
|
||||
fields = [
|
||||
cls.model.tenant_id,
|
||||
cls.model.role,
|
||||
User.nickname,
|
||||
User.email,
|
||||
User.avatar,
|
||||
User.update_date
|
||||
]
|
||||
return list(cls.model.select(*fields)
|
||||
.join(User, on=((cls.model.tenant_id == User.id) & (UserTenant.user_id == user_id) & (UserTenant.status == StatusEnum.VALID.value)))
|
||||
.where(cls.model.status == StatusEnum.VALID.value).dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_user_tenant_relation_by_user_id(cls, user_id):
|
||||
fields = [
|
||||
cls.model.id,
|
||||
cls.model.user_id,
|
||||
cls.model.tenant_id,
|
||||
cls.model.role
|
||||
]
|
||||
return list(cls.model.select(*fields).where(cls.model.user_id == user_id).dicts().dicts())
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def get_num_members(cls, user_id: str):
|
||||
cnt_members = cls.model.select(peewee.fn.COUNT(cls.model.id)).where(cls.model.tenant_id == user_id).scalar()
|
||||
return cnt_members
|
||||
|
||||
@classmethod
|
||||
@DB.connection_context()
|
||||
def filter_by_tenant_and_user_id(cls, tenant_id, user_id):
|
||||
try:
|
||||
user_tenant = cls.model.select().where(
|
||||
(cls.model.tenant_id == tenant_id) & (cls.model.status == StatusEnum.VALID.value) &
|
||||
(cls.model.user_id == user_id)
|
||||
).first()
|
||||
return user_tenant
|
||||
except peewee.DoesNotExist:
|
||||
return None
|
||||
92
api/models/kb_models.py
Normal file
92
api/models/kb_models.py
Normal file
@@ -0,0 +1,92 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from typing import Optional, List, Dict, Any
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class CreateKnowledgeBaseRequest(BaseModel):
|
||||
"""创建知识库请求模型"""
|
||||
name: str = Field(..., description="知识库名称")
|
||||
description: Optional[str] = Field(None, description="知识库描述")
|
||||
parser_id: Optional[str] = Field("naive", description="解析器ID")
|
||||
parser_config: Optional[Dict[str, Any]] = Field(None, description="解析器配置")
|
||||
embd_id: Optional[str] = Field(None, description="嵌入模型ID")
|
||||
|
||||
|
||||
class UpdateKnowledgeBaseRequest(BaseModel):
|
||||
"""更新知识库请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
name: str = Field(..., description="知识库名称")
|
||||
pagerank: Optional[int] = Field(0, description="页面排名")
|
||||
|
||||
|
||||
class DeleteKnowledgeBaseRequest(BaseModel):
|
||||
"""删除知识库请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
|
||||
|
||||
class ListKnowledgeBasesRequest(BaseModel):
|
||||
"""列出知识库请求模型"""
|
||||
owner_ids: Optional[List[str]] = Field([], description="所有者ID列表")
|
||||
|
||||
|
||||
class RemoveTagsRequest(BaseModel):
|
||||
"""移除标签请求模型"""
|
||||
tags: List[str] = Field(..., description="要移除的标签列表")
|
||||
|
||||
|
||||
class RenameTagRequest(BaseModel):
|
||||
"""重命名标签请求模型"""
|
||||
from_tag: str = Field(..., description="原标签名")
|
||||
to_tag: str = Field(..., description="新标签名")
|
||||
|
||||
|
||||
class RunGraphRAGRequest(BaseModel):
|
||||
"""运行GraphRAG请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
|
||||
|
||||
class RunRaptorRequest(BaseModel):
|
||||
"""运行RAPTOR请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
|
||||
|
||||
class RunMindmapRequest(BaseModel):
|
||||
"""运行Mindmap请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
|
||||
|
||||
class ListPipelineLogsRequest(BaseModel):
|
||||
"""列出管道日志请求模型"""
|
||||
operation_status: Optional[List[str]] = Field([], description="操作状态列表")
|
||||
types: Optional[List[str]] = Field([], description="文件类型列表")
|
||||
suffix: Optional[List[str]] = Field([], description="文件后缀列表")
|
||||
|
||||
|
||||
class ListPipelineDatasetLogsRequest(BaseModel):
|
||||
"""列出管道数据集日志请求模型"""
|
||||
operation_status: Optional[List[str]] = Field([], description="操作状态列表")
|
||||
|
||||
|
||||
class DeletePipelineLogsRequest(BaseModel):
|
||||
"""删除管道日志请求模型"""
|
||||
log_ids: List[str] = Field(..., description="日志ID列表")
|
||||
|
||||
|
||||
class UnbindTaskRequest(BaseModel):
|
||||
"""解绑任务请求模型"""
|
||||
kb_id: str = Field(..., description="知识库ID")
|
||||
pipeline_task_type: str = Field(..., description="管道任务类型")
|
||||
170
api/ragflow_server.py
Normal file
170
api/ragflow_server.py
Normal file
@@ -0,0 +1,170 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
# from beartype import BeartypeConf
|
||||
# from beartype.claw import beartype_all # <-- you didn't sign up for this
|
||||
# beartype_all(conf=BeartypeConf(violation_type=UserWarning)) # <-- emit warnings from all code
|
||||
|
||||
from api.utils.log_utils import init_root_logger
|
||||
from plugin import GlobalPluginManager
|
||||
init_root_logger("ragflow_server")
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import threading
|
||||
import uuid
|
||||
|
||||
from werkzeug.serving import run_simple
|
||||
from api import settings
|
||||
from api.apps import app, smtp_mail_server
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api import utils
|
||||
|
||||
from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils.configs import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
from rag.utils.mcp_tool_call_conn import shutdown_all_mcp_sessions
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
stop_event = threading.Event()
|
||||
|
||||
RAGFLOW_DEBUGPY_LISTEN = int(os.environ.get('RAGFLOW_DEBUGPY_LISTEN', "0"))
|
||||
|
||||
def update_progress():
|
||||
lock_value = str(uuid.uuid4())
|
||||
redis_lock = RedisDistributedLock("update_progress", lock_value=lock_value, timeout=60)
|
||||
logging.info(f"update_progress lock_value: {lock_value}")
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
if redis_lock.acquire():
|
||||
DocumentService.update_progress()
|
||||
redis_lock.release()
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
finally:
|
||||
try:
|
||||
redis_lock.release()
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
stop_event.wait(6)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
shutdown_all_mcp_sessions()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
if __name__ == '__main__':
|
||||
logging.info(r"""
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
""")
|
||||
logging.info(
|
||||
f'RAGFlow version: {get_ragflow_version()}'
|
||||
)
|
||||
logging.info(
|
||||
f'project base: {utils.file_utils.get_project_base_directory()}'
|
||||
)
|
||||
show_configs()
|
||||
settings.init_settings()
|
||||
print_rag_settings()
|
||||
|
||||
if RAGFLOW_DEBUGPY_LISTEN > 0:
|
||||
logging.info(f"debugpy listen on {RAGFLOW_DEBUGPY_LISTEN}")
|
||||
import debugpy
|
||||
debugpy.listen(("0.0.0.0", RAGFLOW_DEBUGPY_LISTEN))
|
||||
|
||||
# init db
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
# init runtime config
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--version", default=False, help="RAGFlow version", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", default=False, help="debug mode", action="store_true"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
if args.version:
|
||||
print(get_ragflow_version())
|
||||
sys.exit(0)
|
||||
|
||||
RuntimeConfig.DEBUG = args.debug
|
||||
if RuntimeConfig.DEBUG:
|
||||
logging.info("run on debug mode")
|
||||
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
GlobalPluginManager.load_plugins()
|
||||
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
def delayed_start_update_progress():
|
||||
logging.info("Starting update_progress thread (delayed)")
|
||||
t = threading.Thread(target=update_progress, daemon=True)
|
||||
t.start()
|
||||
|
||||
if RuntimeConfig.DEBUG:
|
||||
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
else:
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
|
||||
# init smtp server
|
||||
if settings.SMTP_CONF:
|
||||
app.config["MAIL_SERVER"] = settings.MAIL_SERVER
|
||||
app.config["MAIL_PORT"] = settings.MAIL_PORT
|
||||
app.config["MAIL_USE_SSL"] = settings.MAIL_USE_SSL
|
||||
app.config["MAIL_USE_TLS"] = settings.MAIL_USE_TLS
|
||||
app.config["MAIL_USERNAME"] = settings.MAIL_USERNAME
|
||||
app.config["MAIL_PASSWORD"] = settings.MAIL_PASSWORD
|
||||
app.config["MAIL_DEFAULT_SENDER"] = settings.MAIL_DEFAULT_SENDER
|
||||
smtp_mail_server.init_app(app)
|
||||
|
||||
|
||||
# start http server
|
||||
try:
|
||||
logging.info("RAGFlow HTTP server start...")
|
||||
run_simple(
|
||||
hostname=settings.HOST_IP,
|
||||
port=settings.HOST_PORT,
|
||||
application=app,
|
||||
threaded=True,
|
||||
use_reloader=RuntimeConfig.DEBUG,
|
||||
use_debugger=RuntimeConfig.DEBUG,
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
179
api/ragflow_server_fastapi.py
Normal file
179
api/ragflow_server_fastapi.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
from api.utils.log_utils import init_root_logger
|
||||
from plugin import GlobalPluginManager
|
||||
init_root_logger("ragflow_server")
|
||||
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
import threading
|
||||
import uuid
|
||||
import argparse
|
||||
|
||||
import uvicorn
|
||||
|
||||
from api import settings
|
||||
from api.db.runtime_config import RuntimeConfig
|
||||
from api.db.services.document_service import DocumentService
|
||||
from api import utils
|
||||
|
||||
from api.db.db_models import init_database_tables as init_web_db
|
||||
from api.db.init_data import init_web_data
|
||||
from api.versions import get_ragflow_version
|
||||
from api.utils.configs import show_configs
|
||||
from rag.settings import print_rag_settings
|
||||
from rag.utils.mcp_tool_call_conn import shutdown_all_mcp_sessions
|
||||
from rag.utils.redis_conn import RedisDistributedLock
|
||||
|
||||
# 全局停止事件
|
||||
stop_event = threading.Event()
|
||||
|
||||
# 调试端口配置
|
||||
RAGFLOW_DEBUGPY_LISTEN = int(os.environ.get('RAGFLOW_DEBUGPY_LISTEN', "0"))
|
||||
|
||||
def update_progress():
|
||||
"""更新进度线程函数"""
|
||||
lock_value = str(uuid.uuid4())
|
||||
redis_lock = RedisDistributedLock("update_progress", lock_value=lock_value, timeout=60)
|
||||
logging.info(f"update_progress lock_value: {lock_value}")
|
||||
while not stop_event.is_set():
|
||||
try:
|
||||
if redis_lock.acquire():
|
||||
DocumentService.update_progress()
|
||||
redis_lock.release()
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
finally:
|
||||
try:
|
||||
redis_lock.release()
|
||||
except Exception:
|
||||
logging.exception("update_progress exception")
|
||||
stop_event.wait(6)
|
||||
|
||||
def signal_handler(sig, frame):
|
||||
"""信号处理器"""
|
||||
logging.info("Received interrupt signal, shutting down...")
|
||||
shutdown_all_mcp_sessions()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
|
||||
def setup_health_check(app):
|
||||
"""设置健康检查端点"""
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
return {"status": "healthy", "version": get_ragflow_version()}
|
||||
|
||||
def main():
|
||||
"""主函数"""
|
||||
logging.info(r"""
|
||||
____ ___ ______ ______ __
|
||||
/ __ \ / | / ____// ____// /____ _ __
|
||||
/ /_/ // /| | / / __ / /_ / // __ \| | /| / /
|
||||
/ _, _// ___ |/ /_/ // __/ / // /_/ /| |/ |/ /
|
||||
/_/ |_|/_/ |_|\____//_/ /_/ \____/ |__/|__/
|
||||
|
||||
""")
|
||||
logging.info(f'RAGFlow version: {get_ragflow_version()}')
|
||||
logging.info(f'project base: {utils.file_utils.get_project_base_directory()}')
|
||||
|
||||
show_configs()
|
||||
settings.init_settings()
|
||||
print_rag_settings()
|
||||
|
||||
# 调试模式配置
|
||||
if RAGFLOW_DEBUGPY_LISTEN > 0:
|
||||
logging.info(f"debugpy listen on {RAGFLOW_DEBUGPY_LISTEN}")
|
||||
try:
|
||||
import debugpy
|
||||
debugpy.listen(("0.0.0.0", RAGFLOW_DEBUGPY_LISTEN))
|
||||
except ImportError:
|
||||
logging.warning("debugpy not available, skipping debug setup")
|
||||
|
||||
# 初始化数据库
|
||||
init_web_db()
|
||||
init_web_data()
|
||||
|
||||
# 解析命令行参数
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--version", default=False, help="RAGFlow version", action="store_true"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--debug", default=False, help="debug mode", action="store_true"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.version:
|
||||
print(get_ragflow_version())
|
||||
sys.exit(0)
|
||||
|
||||
RuntimeConfig.DEBUG = args.debug
|
||||
if RuntimeConfig.DEBUG:
|
||||
logging.info("run on debug mode")
|
||||
|
||||
RuntimeConfig.init_env()
|
||||
RuntimeConfig.init_config(JOB_SERVER_HOST=settings.HOST_IP, HTTP_PORT=settings.HOST_PORT)
|
||||
|
||||
# 加载插件
|
||||
GlobalPluginManager.load_plugins()
|
||||
|
||||
# 设置信号处理器
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
|
||||
def delayed_start_update_progress():
|
||||
"""延迟启动进度更新线程"""
|
||||
logging.info("Starting update_progress thread (delayed)")
|
||||
t = threading.Thread(target=update_progress, daemon=True)
|
||||
t.start()
|
||||
|
||||
# 启动进度更新线程
|
||||
if RuntimeConfig.DEBUG:
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
else:
|
||||
threading.Timer(1.0, delayed_start_update_progress).start()
|
||||
|
||||
# 导入FastAPI应用
|
||||
from api.apps.__init___fastapi import app
|
||||
|
||||
# 设置健康检查端点
|
||||
setup_health_check(app)
|
||||
|
||||
# 启动HTTP服务器
|
||||
try:
|
||||
logging.info("RAGFlow HTTP server start...")
|
||||
uvicorn.run(
|
||||
app,
|
||||
host=settings.HOST_IP,
|
||||
port=settings.HOST_PORT,
|
||||
log_level="info" if not RuntimeConfig.DEBUG else "debug",
|
||||
reload=RuntimeConfig.DEBUG,
|
||||
access_log=True
|
||||
)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
stop_event.set()
|
||||
time.sleep(1)
|
||||
os.kill(os.getpid(), signal.SIGKILL)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
278
api/settings.py
Normal file
278
api/settings.py
Normal file
@@ -0,0 +1,278 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import json
|
||||
import os
|
||||
import secrets
|
||||
from datetime import date
|
||||
from enum import Enum, IntEnum
|
||||
|
||||
import rag.utils
|
||||
import rag.utils.es_conn
|
||||
import rag.utils.infinity_conn
|
||||
import rag.utils.opensearch_conn
|
||||
from api.constants import RAG_FLOW_SERVICE_NAME
|
||||
from api.utils.configs import decrypt_database_config, get_base_config
|
||||
from api.utils.file_utils import get_project_base_directory
|
||||
from rag.nlp import search
|
||||
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
|
||||
LLM = None
|
||||
LLM_FACTORY = None
|
||||
LLM_BASE_URL = None
|
||||
CHAT_MDL = ""
|
||||
EMBEDDING_MDL = ""
|
||||
RERANK_MDL = ""
|
||||
ASR_MDL = ""
|
||||
IMAGE2TEXT_MDL = ""
|
||||
CHAT_CFG = ""
|
||||
EMBEDDING_CFG = ""
|
||||
RERANK_CFG = ""
|
||||
ASR_CFG = ""
|
||||
IMAGE2TEXT_CFG = ""
|
||||
API_KEY = None
|
||||
PARSERS = None
|
||||
HOST_IP = None
|
||||
HOST_PORT = None
|
||||
SECRET_KEY = None
|
||||
FACTORY_LLM_INFOS = None
|
||||
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = None
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = None
|
||||
HTTP_APP_KEY = None
|
||||
GITHUB_OAUTH = None
|
||||
FEISHU_OAUTH = None
|
||||
OAUTH_CONFIG = None
|
||||
DOC_ENGINE = None
|
||||
docStoreConn = None
|
||||
|
||||
retrievaler = None
|
||||
kg_retrievaler = None
|
||||
|
||||
# user registration switch
|
||||
REGISTER_ENABLED = 1
|
||||
|
||||
|
||||
# sandbox-executor-manager
|
||||
SANDBOX_ENABLED = 0
|
||||
SANDBOX_HOST = None
|
||||
STRONG_TEST_COUNT = int(os.environ.get("STRONG_TEST_COUNT", "8"))
|
||||
|
||||
BUILTIN_EMBEDDING_MODELS = ["BAAI/bge-large-zh-v1.5@BAAI", "maidalun1020/bce-embedding-base_v1@Youdao"]
|
||||
|
||||
SMTP_CONF = None
|
||||
MAIL_SERVER = ""
|
||||
MAIL_PORT = 000
|
||||
MAIL_USE_SSL= True
|
||||
MAIL_USE_TLS = False
|
||||
MAIL_USERNAME = ""
|
||||
MAIL_PASSWORD = ""
|
||||
MAIL_DEFAULT_SENDER = ()
|
||||
MAIL_FRONTEND_URL = ""
|
||||
|
||||
|
||||
def get_or_create_secret_key():
|
||||
secret_key = os.environ.get("RAGFLOW_SECRET_KEY")
|
||||
if secret_key and len(secret_key) >= 32:
|
||||
return secret_key
|
||||
|
||||
# Check if there's a configured secret key
|
||||
configured_key = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("secret_key")
|
||||
if configured_key and configured_key != str(date.today()) and len(configured_key) >= 32:
|
||||
return configured_key
|
||||
|
||||
# Generate a new secure key and warn about it
|
||||
import logging
|
||||
|
||||
new_key = secrets.token_hex(32)
|
||||
logging.warning(f"SECURITY WARNING: Using auto-generated SECRET_KEY. Generated key: {new_key}")
|
||||
return new_key
|
||||
|
||||
|
||||
def init_settings():
|
||||
global LLM, LLM_FACTORY, LLM_BASE_URL, LIGHTEN, DATABASE_TYPE, DATABASE, FACTORY_LLM_INFOS, REGISTER_ENABLED
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
DATABASE_TYPE = os.getenv("DB_TYPE", "mysql")
|
||||
DATABASE = decrypt_database_config(name=DATABASE_TYPE)
|
||||
LLM = get_base_config("user_default_llm", {}) or {}
|
||||
LLM_DEFAULT_MODELS = LLM.get("default_models", {}) or {}
|
||||
LLM_FACTORY = LLM.get("factory", "") or ""
|
||||
LLM_BASE_URL = LLM.get("base_url", "") or ""
|
||||
try:
|
||||
REGISTER_ENABLED = int(os.environ.get("REGISTER_ENABLED", "1"))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
with open(os.path.join(get_project_base_directory(), "conf", "llm_factories.json"), "r") as f:
|
||||
FACTORY_LLM_INFOS = json.load(f)["factory_llm_infos"]
|
||||
except Exception:
|
||||
FACTORY_LLM_INFOS = []
|
||||
|
||||
global CHAT_MDL, EMBEDDING_MDL, RERANK_MDL, ASR_MDL, IMAGE2TEXT_MDL
|
||||
global CHAT_CFG, EMBEDDING_CFG, RERANK_CFG, ASR_CFG, IMAGE2TEXT_CFG
|
||||
if not LIGHTEN:
|
||||
EMBEDDING_MDL = BUILTIN_EMBEDDING_MODELS[0]
|
||||
|
||||
global API_KEY, PARSERS, HOST_IP, HOST_PORT, SECRET_KEY
|
||||
API_KEY = LLM.get("api_key")
|
||||
PARSERS = LLM.get(
|
||||
"parsers", "naive:General,qa:Q&A,resume:Resume,manual:Manual,table:Table,paper:Paper,book:Book,laws:Laws,presentation:Presentation,picture:Picture,one:One,audio:Audio,email:Email,tag:Tag"
|
||||
)
|
||||
|
||||
chat_entry = _parse_model_entry(LLM_DEFAULT_MODELS.get("chat_model", CHAT_MDL))
|
||||
embedding_entry = _parse_model_entry(LLM_DEFAULT_MODELS.get("embedding_model", EMBEDDING_MDL))
|
||||
rerank_entry = _parse_model_entry(LLM_DEFAULT_MODELS.get("rerank_model", RERANK_MDL))
|
||||
asr_entry = _parse_model_entry(LLM_DEFAULT_MODELS.get("asr_model", ASR_MDL))
|
||||
image2text_entry = _parse_model_entry(LLM_DEFAULT_MODELS.get("image2text_model", IMAGE2TEXT_MDL))
|
||||
|
||||
CHAT_CFG = _resolve_per_model_config(chat_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
|
||||
EMBEDDING_CFG = _resolve_per_model_config(embedding_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
|
||||
RERANK_CFG = _resolve_per_model_config(rerank_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
|
||||
ASR_CFG = _resolve_per_model_config(asr_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
|
||||
IMAGE2TEXT_CFG = _resolve_per_model_config(image2text_entry, LLM_FACTORY, API_KEY, LLM_BASE_URL)
|
||||
|
||||
CHAT_MDL = CHAT_CFG.get("model", "") or ""
|
||||
EMBEDDING_MDL = EMBEDDING_CFG.get("model", "") or ""
|
||||
RERANK_MDL = RERANK_CFG.get("model", "") or ""
|
||||
ASR_MDL = ASR_CFG.get("model", "") or ""
|
||||
IMAGE2TEXT_MDL = IMAGE2TEXT_CFG.get("model", "") or ""
|
||||
|
||||
HOST_IP = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("host", "127.0.0.1")
|
||||
HOST_PORT = get_base_config(RAG_FLOW_SERVICE_NAME, {}).get("http_port")
|
||||
|
||||
SECRET_KEY = get_or_create_secret_key()
|
||||
|
||||
global AUTHENTICATION_CONF, CLIENT_AUTHENTICATION, HTTP_APP_KEY, GITHUB_OAUTH, FEISHU_OAUTH, OAUTH_CONFIG
|
||||
# authentication
|
||||
AUTHENTICATION_CONF = get_base_config("authentication", {})
|
||||
|
||||
# client
|
||||
CLIENT_AUTHENTICATION = AUTHENTICATION_CONF.get("client", {}).get("switch", False)
|
||||
HTTP_APP_KEY = AUTHENTICATION_CONF.get("client", {}).get("http_app_key")
|
||||
GITHUB_OAUTH = get_base_config("oauth", {}).get("github")
|
||||
FEISHU_OAUTH = get_base_config("oauth", {}).get("feishu")
|
||||
|
||||
OAUTH_CONFIG = get_base_config("oauth", {})
|
||||
|
||||
global DOC_ENGINE, docStoreConn, retrievaler, kg_retrievaler
|
||||
DOC_ENGINE = os.environ.get("DOC_ENGINE", "elasticsearch")
|
||||
# DOC_ENGINE = os.environ.get('DOC_ENGINE', "opensearch")
|
||||
lower_case_doc_engine = DOC_ENGINE.lower()
|
||||
if lower_case_doc_engine == "elasticsearch":
|
||||
docStoreConn = rag.utils.es_conn.ESConnection()
|
||||
elif lower_case_doc_engine == "infinity":
|
||||
docStoreConn = rag.utils.infinity_conn.InfinityConnection()
|
||||
elif lower_case_doc_engine == "opensearch":
|
||||
docStoreConn = rag.utils.opensearch_conn.OSConnection()
|
||||
else:
|
||||
raise Exception(f"Not supported doc engine: {DOC_ENGINE}")
|
||||
|
||||
retrievaler = search.Dealer(docStoreConn)
|
||||
from graphrag import search as kg_search
|
||||
|
||||
kg_retrievaler = kg_search.KGSearch(docStoreConn)
|
||||
|
||||
if int(os.environ.get("SANDBOX_ENABLED", "0")):
|
||||
global SANDBOX_HOST
|
||||
SANDBOX_HOST = os.environ.get("SANDBOX_HOST", "sandbox-executor-manager")
|
||||
|
||||
global SMTP_CONF, MAIL_SERVER, MAIL_PORT, MAIL_USE_SSL, MAIL_USE_TLS
|
||||
global MAIL_USERNAME, MAIL_PASSWORD, MAIL_DEFAULT_SENDER, MAIL_FRONTEND_URL
|
||||
SMTP_CONF = get_base_config("smtp", {})
|
||||
|
||||
MAIL_SERVER = SMTP_CONF.get("mail_server", "")
|
||||
MAIL_PORT = SMTP_CONF.get("mail_port", 000)
|
||||
MAIL_USE_SSL = SMTP_CONF.get("mail_use_ssl", True)
|
||||
MAIL_USE_TLS = SMTP_CONF.get("mail_use_tls", False)
|
||||
MAIL_USERNAME = SMTP_CONF.get("mail_username", "")
|
||||
MAIL_PASSWORD = SMTP_CONF.get("mail_password", "")
|
||||
mail_default_sender = SMTP_CONF.get("mail_default_sender", [])
|
||||
if mail_default_sender and len(mail_default_sender) >= 2:
|
||||
MAIL_DEFAULT_SENDER = (mail_default_sender[0], mail_default_sender[1])
|
||||
MAIL_FRONTEND_URL = SMTP_CONF.get("mail_frontend_url", "")
|
||||
|
||||
|
||||
class CustomEnum(Enum):
|
||||
@classmethod
|
||||
def valid(cls, value):
|
||||
try:
|
||||
cls(value)
|
||||
return True
|
||||
except BaseException:
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def values(cls):
|
||||
return [member.value for member in cls.__members__.values()]
|
||||
|
||||
@classmethod
|
||||
def names(cls):
|
||||
return [member.name for member in cls.__members__.values()]
|
||||
|
||||
|
||||
class RetCode(IntEnum, CustomEnum):
|
||||
SUCCESS = 0
|
||||
NOT_EFFECTIVE = 10
|
||||
EXCEPTION_ERROR = 100
|
||||
ARGUMENT_ERROR = 101
|
||||
DATA_ERROR = 102
|
||||
OPERATING_ERROR = 103
|
||||
CONNECTION_ERROR = 105
|
||||
RUNNING = 106
|
||||
PERMISSION_ERROR = 108
|
||||
AUTHENTICATION_ERROR = 109
|
||||
UNAUTHORIZED = 401
|
||||
SERVER_ERROR = 500
|
||||
FORBIDDEN = 403
|
||||
NOT_FOUND = 404
|
||||
|
||||
|
||||
def _parse_model_entry(entry):
|
||||
if isinstance(entry, str):
|
||||
return {"name": entry, "factory": None, "api_key": None, "base_url": None}
|
||||
if isinstance(entry, dict):
|
||||
name = entry.get("name") or entry.get("model") or ""
|
||||
return {
|
||||
"name": name,
|
||||
"factory": entry.get("factory"),
|
||||
"api_key": entry.get("api_key"),
|
||||
"base_url": entry.get("base_url"),
|
||||
}
|
||||
return {"name": "", "factory": None, "api_key": None, "base_url": None}
|
||||
|
||||
|
||||
def _resolve_per_model_config(entry_dict, backup_factory, backup_api_key, backup_base_url):
|
||||
name = (entry_dict.get("name") or "").strip()
|
||||
m_factory = entry_dict.get("factory") or backup_factory or ""
|
||||
m_api_key = entry_dict.get("api_key") or backup_api_key or ""
|
||||
m_base_url = entry_dict.get("base_url") or backup_base_url or ""
|
||||
|
||||
if name and "@" not in name and m_factory:
|
||||
name = f"{name}@{m_factory}"
|
||||
|
||||
return {
|
||||
"model": name,
|
||||
"factory": m_factory,
|
||||
"api_key": m_api_key,
|
||||
"base_url": m_base_url,
|
||||
}
|
||||
132
api/utils/__init__.py
Normal file
132
api/utils/__init__.py
Normal file
@@ -0,0 +1,132 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import base64
|
||||
import datetime
|
||||
import hashlib
|
||||
import os
|
||||
import socket
|
||||
import time
|
||||
import uuid
|
||||
import requests
|
||||
|
||||
import importlib
|
||||
|
||||
from .common import string_to_bytes
|
||||
|
||||
|
||||
def current_timestamp():
|
||||
return int(time.time() * 1000)
|
||||
|
||||
|
||||
def timestamp_to_date(timestamp, format_string="%Y-%m-%d %H:%M:%S"):
|
||||
if not timestamp:
|
||||
timestamp = time.time()
|
||||
timestamp = int(timestamp) / 1000
|
||||
time_array = time.localtime(timestamp)
|
||||
str_date = time.strftime(format_string, time_array)
|
||||
return str_date
|
||||
|
||||
|
||||
def date_string_to_timestamp(time_str, format_string="%Y-%m-%d %H:%M:%S"):
|
||||
time_array = time.strptime(time_str, format_string)
|
||||
time_stamp = int(time.mktime(time_array) * 1000)
|
||||
return time_stamp
|
||||
|
||||
|
||||
def get_lan_ip():
|
||||
if os.name != "nt":
|
||||
import fcntl
|
||||
import struct
|
||||
|
||||
def get_interface_ip(ifname):
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
return socket.inet_ntoa(
|
||||
fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', string_to_bytes(ifname[:15])))[20:24])
|
||||
|
||||
ip = socket.gethostbyname(socket.getfqdn())
|
||||
if ip.startswith("127.") and os.name != "nt":
|
||||
interfaces = [
|
||||
"bond1",
|
||||
"eth0",
|
||||
"eth1",
|
||||
"eth2",
|
||||
"wlan0",
|
||||
"wlan1",
|
||||
"wifi0",
|
||||
"ath0",
|
||||
"ath1",
|
||||
"ppp0",
|
||||
]
|
||||
for ifname in interfaces:
|
||||
try:
|
||||
ip = get_interface_ip(ifname)
|
||||
break
|
||||
except IOError:
|
||||
pass
|
||||
return ip or ''
|
||||
|
||||
|
||||
def from_dict_hook(in_dict: dict):
|
||||
if "type" in in_dict and "data" in in_dict:
|
||||
if in_dict["module"] is None:
|
||||
return in_dict["data"]
|
||||
else:
|
||||
return getattr(importlib.import_module(
|
||||
in_dict["module"]), in_dict["type"])(**in_dict["data"])
|
||||
else:
|
||||
return in_dict
|
||||
|
||||
|
||||
def get_uuid():
|
||||
return uuid.uuid1().hex
|
||||
|
||||
|
||||
def datetime_format(date_time: datetime.datetime) -> datetime.datetime:
|
||||
return datetime.datetime(date_time.year, date_time.month, date_time.day,
|
||||
date_time.hour, date_time.minute, date_time.second)
|
||||
|
||||
|
||||
def get_format_time() -> datetime.datetime:
|
||||
return datetime_format(datetime.datetime.now())
|
||||
|
||||
|
||||
def str2date(date_time: str):
|
||||
return datetime.datetime.strptime(date_time, '%Y-%m-%d')
|
||||
|
||||
|
||||
def elapsed2time(elapsed):
|
||||
seconds = elapsed / 1000
|
||||
minuter, second = divmod(seconds, 60)
|
||||
hour, minuter = divmod(minuter, 60)
|
||||
return '%02d:%02d:%02d' % (hour, minuter, second)
|
||||
|
||||
|
||||
def download_img(url):
|
||||
if not url:
|
||||
return ""
|
||||
response = requests.get(url)
|
||||
return "data:" + \
|
||||
response.headers.get('Content-Type', 'image/jpg') + ";" + \
|
||||
"base64," + base64.b64encode(response.content).decode("utf-8")
|
||||
|
||||
|
||||
def delta_seconds(date_string: str):
|
||||
dt = datetime.datetime.strptime(date_string, "%Y-%m-%d %H:%M:%S")
|
||||
return (datetime.datetime.now() - dt).total_seconds()
|
||||
|
||||
|
||||
def hash_str2int(line: str, mod: int = 10 ** 8) -> int:
|
||||
return int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16) % mod
|
||||
873
api/utils/api_utils.py
Normal file
873
api/utils/api_utils.py
Normal file
@@ -0,0 +1,873 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import asyncio
|
||||
import functools
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
from base64 import b64encode
|
||||
from copy import deepcopy
|
||||
from functools import wraps
|
||||
from hmac import HMAC
|
||||
from io import BytesIO
|
||||
from typing import Any, Callable, Coroutine, Optional, Type, Union
|
||||
from urllib.parse import quote, urlencode
|
||||
from uuid import uuid1
|
||||
|
||||
import requests
|
||||
import trio
|
||||
# FastAPI imports
|
||||
from fastapi import Request, Response as FastAPIResponse, HTTPException, status
|
||||
from fastapi.responses import JSONResponse, FileResponse, StreamingResponse
|
||||
from fastapi import Depends
|
||||
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials
|
||||
from itsdangerous import URLSafeTimedSerializer
|
||||
from peewee import OperationalError
|
||||
from werkzeug.http import HTTP_STATUS_CODES
|
||||
|
||||
from api import settings
|
||||
from api.constants import REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC
|
||||
from api.db import ActiveEnum
|
||||
from api.db.db_models import APIToken
|
||||
from api.db.services import UserService
|
||||
from api.db.services.llm_service import LLMService
|
||||
from api.db.services.tenant_llm_service import TenantLLMService
|
||||
from api.utils.json import CustomJSONEncoder, json_dumps
|
||||
|
||||
# FastAPI 安全方案
|
||||
security = HTTPBearer()
|
||||
from api.utils import get_uuid
|
||||
from rag.utils.mcp_tool_call_conn import MCPToolCallSession, close_multiple_mcp_toolcall_sessions
|
||||
|
||||
requests.models.complexjson.dumps = functools.partial(json.dumps, cls=CustomJSONEncoder)
|
||||
|
||||
def serialize_for_json(obj):
|
||||
"""
|
||||
Recursively serialize objects to make them JSON serializable.
|
||||
Handles ModelMetaclass and other non-serializable objects.
|
||||
"""
|
||||
if hasattr(obj, '__dict__'):
|
||||
# For objects with __dict__, try to serialize their attributes
|
||||
try:
|
||||
return {key: serialize_for_json(value) for key, value in obj.__dict__.items()
|
||||
if not key.startswith('_')}
|
||||
except (AttributeError, TypeError):
|
||||
return str(obj)
|
||||
elif hasattr(obj, '__name__'):
|
||||
# For classes and metaclasses, return their name
|
||||
return f"<{obj.__module__}.{obj.__name__}>" if hasattr(obj, '__module__') else f"<{obj.__name__}>"
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
return [serialize_for_json(item) for item in obj]
|
||||
elif isinstance(obj, dict):
|
||||
return {key: serialize_for_json(value) for key, value in obj.items()}
|
||||
elif isinstance(obj, (str, int, float, bool)) or obj is None:
|
||||
return obj
|
||||
else:
|
||||
# Fallback: convert to string representation
|
||||
return str(obj)
|
||||
|
||||
def request(**kwargs):
|
||||
sess = requests.Session()
|
||||
stream = kwargs.pop("stream", sess.stream)
|
||||
timeout = kwargs.pop("timeout", None)
|
||||
kwargs["headers"] = {k.replace("_", "-").upper(): v for k, v in kwargs.get("headers", {}).items()}
|
||||
prepped = requests.Request(**kwargs).prepare()
|
||||
|
||||
if settings.CLIENT_AUTHENTICATION and settings.HTTP_APP_KEY and settings.SECRET_KEY:
|
||||
timestamp = str(round(time() * 1000))
|
||||
nonce = str(uuid1())
|
||||
signature = b64encode(
|
||||
HMAC(
|
||||
settings.SECRET_KEY.encode("ascii"),
|
||||
b"\n".join(
|
||||
[
|
||||
timestamp.encode("ascii"),
|
||||
nonce.encode("ascii"),
|
||||
settings.HTTP_APP_KEY.encode("ascii"),
|
||||
prepped.path_url.encode("ascii"),
|
||||
prepped.body if kwargs.get("json") else b"",
|
||||
urlencode(sorted(kwargs["data"].items()), quote_via=quote, safe="-._~").encode("ascii") if kwargs.get("data") and isinstance(kwargs["data"], dict) else b"",
|
||||
]
|
||||
),
|
||||
"sha1",
|
||||
).digest()
|
||||
).decode("ascii")
|
||||
|
||||
prepped.headers.update(
|
||||
{
|
||||
"TIMESTAMP": timestamp,
|
||||
"NONCE": nonce,
|
||||
"APP-KEY": settings.HTTP_APP_KEY,
|
||||
"SIGNATURE": signature,
|
||||
}
|
||||
)
|
||||
|
||||
return sess.send(prepped, stream=stream, timeout=timeout)
|
||||
|
||||
|
||||
def get_exponential_backoff_interval(retries, full_jitter=False):
|
||||
"""Calculate the exponential backoff wait time."""
|
||||
# Will be zero if factor equals 0
|
||||
countdown = min(REQUEST_MAX_WAIT_SEC, REQUEST_WAIT_SEC * (2**retries))
|
||||
# Full jitter according to
|
||||
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
|
||||
if full_jitter:
|
||||
countdown = random.randrange(countdown + 1)
|
||||
# Adjust according to maximum wait time and account for negative values.
|
||||
return max(0, countdown)
|
||||
|
||||
|
||||
def get_data_error_result(code=settings.RetCode.DATA_ERROR, message="Sorry! Data missing!"):
|
||||
logging.exception(Exception(message))
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response[key] = value
|
||||
return JSONResponse(content=response)
|
||||
|
||||
|
||||
def server_error_response(e):
|
||||
logging.exception(e)
|
||||
try:
|
||||
if e.code == 401:
|
||||
return get_json_result(code=401, message=repr(e))
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
try:
|
||||
serialized_data = serialize_for_json(e.args[1])
|
||||
return get_json_result(code= settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=serialized_data)
|
||||
except Exception:
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=None)
|
||||
if repr(e).find("index_not_found_exception") >= 0:
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message="No chunk found, please upload file and parse it.")
|
||||
|
||||
return get_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
def error_response(response_code, message=None):
|
||||
if message is None:
|
||||
message = HTTP_STATUS_CODES.get(response_code, "Unknown Error")
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"message": message,
|
||||
"code": response_code,
|
||||
},
|
||||
status_code=response_code,
|
||||
)
|
||||
|
||||
|
||||
# FastAPI 版本:使用 Pydantic 模型进行验证,而不是装饰器
|
||||
# 这个装饰器在 FastAPI 中不再需要,因为 FastAPI 会自动验证 Pydantic 模型
|
||||
def validate_request(*args, **kwargs):
|
||||
"""
|
||||
废弃的装饰器:在 FastAPI 中使用 Pydantic 模型进行验证
|
||||
这个函数保留是为了向后兼容,但不会执行任何验证
|
||||
"""
|
||||
def wrapper(func):
|
||||
@wraps(func)
|
||||
def decorated_function(*_args, **_kwargs):
|
||||
# FastAPI 中不需要手动验证,Pydantic 会自动处理
|
||||
return func(*_args, **_kwargs)
|
||||
return decorated_function
|
||||
return wrapper
|
||||
|
||||
|
||||
def not_allowed_parameters(*params):
|
||||
"""
|
||||
废弃的装饰器:在 FastAPI 中使用 Pydantic 模型进行验证
|
||||
这个函数保留是为了向后兼容,但不会执行任何验证
|
||||
"""
|
||||
def decorator(f):
|
||||
def wrapper(*args, **kwargs):
|
||||
# FastAPI 中不需要手动验证,Pydantic 会自动处理
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def active_required(f):
|
||||
"""
|
||||
废弃的装饰器:在 FastAPI 中使用依赖注入进行用户验证
|
||||
这个函数保留是为了向后兼容,但不会执行任何验证
|
||||
"""
|
||||
@wraps(f)
|
||||
def wrapper(*args, **kwargs):
|
||||
# FastAPI 中使用依赖注入进行用户验证
|
||||
return f(*args, **kwargs)
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_localhost(ip):
|
||||
return ip in {"127.0.0.1", "::1", "[::1]", "localhost"}
|
||||
|
||||
|
||||
def send_file_in_mem(data, filename):
|
||||
"""
|
||||
发送内存中的文件数据
|
||||
注意:在 FastAPI 中,这个函数需要接收 Request 参数来正确处理响应
|
||||
"""
|
||||
if not isinstance(data, (str, bytes)):
|
||||
data = json_dumps(data)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf-8")
|
||||
|
||||
f = BytesIO()
|
||||
f.write(data)
|
||||
f.seek(0)
|
||||
|
||||
# 在 FastAPI 中,应该使用 FileResponse 或 StreamingResponse
|
||||
# 这里返回文件对象,调用者需要处理响应
|
||||
return f
|
||||
|
||||
|
||||
def get_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||
response = {"code": code, "message": message, "data": data}
|
||||
return JSONResponse(content=response)
|
||||
|
||||
|
||||
def apikey_required(func):
|
||||
"""
|
||||
废弃的装饰器:在 FastAPI 中使用依赖注入进行 API Key 验证
|
||||
这个函数保留是为了向后兼容,但不会执行任何验证
|
||||
"""
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
# FastAPI 中使用依赖注入进行 API Key 验证
|
||||
return func(*args, **kwargs)
|
||||
return decorated_function
|
||||
|
||||
|
||||
def build_error_result(code=settings.RetCode.FORBIDDEN, message="success"):
|
||||
response = {"code": code, "message": message}
|
||||
return JSONResponse(content=response, status_code=code)
|
||||
|
||||
|
||||
def construct_response(code=settings.RetCode.SUCCESS, message="success", data=None, auth=None):
|
||||
result_dict = {"code": code, "message": message, "data": data}
|
||||
response_dict = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response_dict[key] = value
|
||||
|
||||
headers = {
|
||||
"Access-Control-Allow-Origin": "*",
|
||||
"Access-Control-Allow-Method": "*",
|
||||
"Access-Control-Allow-Headers": "*",
|
||||
"Access-Control-Expose-Headers": "Authorization"
|
||||
}
|
||||
if auth:
|
||||
headers["Authorization"] = auth
|
||||
|
||||
return JSONResponse(content=response_dict, headers=headers)
|
||||
|
||||
|
||||
def construct_result(code=settings.RetCode.DATA_ERROR, message="data is missing"):
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response[key] = value
|
||||
return JSONResponse(content=response)
|
||||
|
||||
|
||||
def construct_json_result(code=settings.RetCode.SUCCESS, message="success", data=None):
|
||||
if data is None:
|
||||
return JSONResponse(content={"code": code, "message": message})
|
||||
else:
|
||||
return JSONResponse(content={"code": code, "message": message, "data": data})
|
||||
|
||||
|
||||
def construct_error_response(e):
|
||||
logging.exception(e)
|
||||
try:
|
||||
if e.code == 401:
|
||||
return construct_json_result(code=settings.RetCode.UNAUTHORIZED, message=repr(e))
|
||||
except BaseException:
|
||||
pass
|
||||
if len(e.args) > 1:
|
||||
return construct_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e.args[0]), data=e.args[1])
|
||||
return construct_json_result(code=settings.RetCode.EXCEPTION_ERROR, message=repr(e))
|
||||
|
||||
|
||||
def token_required(func):
|
||||
"""
|
||||
废弃的装饰器:在 FastAPI 中使用依赖注入进行 Token 验证
|
||||
这个函数保留是为了向后兼容,但不会执行任何验证
|
||||
"""
|
||||
@wraps(func)
|
||||
def decorated_function(*args, **kwargs):
|
||||
# FastAPI 中使用依赖注入进行 Token 验证
|
||||
return func(*args, **kwargs)
|
||||
return decorated_function
|
||||
|
||||
|
||||
def get_result(code=settings.RetCode.SUCCESS, message="", data=None):
|
||||
if code == 0:
|
||||
if data is not None:
|
||||
response = {"code": code, "data": data}
|
||||
else:
|
||||
response = {"code": code}
|
||||
else:
|
||||
response = {"code": code, "message": message}
|
||||
return JSONResponse(content=response)
|
||||
|
||||
|
||||
def get_error_data_result(
|
||||
message="Sorry! Data missing!",
|
||||
code=settings.RetCode.DATA_ERROR,
|
||||
):
|
||||
result_dict = {"code": code, "message": message}
|
||||
response = {}
|
||||
for key, value in result_dict.items():
|
||||
if value is None and key != "code":
|
||||
continue
|
||||
else:
|
||||
response[key] = value
|
||||
return JSONResponse(content=response)
|
||||
|
||||
|
||||
def get_error_argument_result(message="Invalid arguments"):
|
||||
return get_result(code=settings.RetCode.ARGUMENT_ERROR, message=message)
|
||||
|
||||
|
||||
# FastAPI 依赖注入函数
|
||||
async def get_current_user(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户 - FastAPI 版本"""
|
||||
from api.db import StatusEnum
|
||||
try:
|
||||
jwt = URLSafeTimedSerializer(secret_key=settings.SECRET_KEY)
|
||||
authorization = credentials.credentials
|
||||
|
||||
if authorization:
|
||||
try:
|
||||
access_token = str(jwt.loads(authorization))
|
||||
|
||||
if not access_token or not access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
|
||||
# Access tokens should be UUIDs (32 hex characters)
|
||||
if len(access_token.strip()) < 32:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication attempt with invalid token format: {len(access_token)} chars"
|
||||
)
|
||||
|
||||
user = UserService.query(
|
||||
access_token=access_token, status=StatusEnum.VALID.value
|
||||
)
|
||||
if user:
|
||||
if not user[0].access_token or not user[0].access_token.strip():
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication attempt with empty access token"
|
||||
)
|
||||
return user[0]
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication failed: Invalid access token"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication failed: {str(e)}"
|
||||
)
|
||||
else:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authentication failed: No authorization header"
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail=f"Authentication failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
async def get_current_user_optional(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""获取当前用户(可选)- FastAPI 版本"""
|
||||
try:
|
||||
return await get_current_user(credentials)
|
||||
except HTTPException:
|
||||
return None
|
||||
|
||||
|
||||
async def verify_api_key(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
||||
"""验证 API Key - FastAPI 版本"""
|
||||
try:
|
||||
token = credentials.credentials
|
||||
objs = APIToken.query(token=token)
|
||||
if not objs:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail="API-KEY is invalid!"
|
||||
)
|
||||
return objs[0]
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_403_FORBIDDEN,
|
||||
detail=f"API Key verification failed: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
def create_file_response(data, filename: str, media_type: str = "application/octet-stream"):
|
||||
"""创建文件响应 - FastAPI 版本"""
|
||||
if not isinstance(data, (str, bytes)):
|
||||
data = json_dumps(data)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf-8")
|
||||
|
||||
return StreamingResponse(
|
||||
BytesIO(data),
|
||||
media_type=media_type,
|
||||
headers={"Content-Disposition": f"attachment; filename={filename}"}
|
||||
)
|
||||
|
||||
|
||||
def get_error_permission_result(message="Permission error"):
|
||||
return get_result(code=settings.RetCode.PERMISSION_ERROR, message=message)
|
||||
|
||||
|
||||
def get_error_operating_result(message="Operating error"):
|
||||
return get_result(code=settings.RetCode.OPERATING_ERROR, message=message)
|
||||
|
||||
|
||||
def generate_confirmation_token(tenant_id):
|
||||
serializer = URLSafeTimedSerializer(tenant_id)
|
||||
return "ragflow-" + serializer.dumps(get_uuid(), salt=tenant_id)[2:34]
|
||||
|
||||
|
||||
def get_parser_config(chunk_method, parser_config):
|
||||
if not chunk_method:
|
||||
chunk_method = "naive"
|
||||
|
||||
# Define default configurations for each chunking method
|
||||
key_mapping = {
|
||||
"naive": {"chunk_token_num": 512, "delimiter": r"\n", "html4excel": False, "layout_recognize": "DeepDOC", "raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"qa": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"tag": None,
|
||||
"resume": None,
|
||||
"manual": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"table": None,
|
||||
"paper": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"book": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"laws": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"presentation": {"raptor": {"use_raptor": False}, "graphrag": {"use_graphrag": False}},
|
||||
"one": None,
|
||||
"knowledge_graph": {
|
||||
"chunk_token_num": 8192,
|
||||
"delimiter": r"\n",
|
||||
"entity_types": ["organization", "person", "location", "event", "time"],
|
||||
"raptor": {"use_raptor": False},
|
||||
"graphrag": {"use_graphrag": False},
|
||||
},
|
||||
"email": None,
|
||||
"picture": None,
|
||||
}
|
||||
|
||||
default_config = key_mapping[chunk_method]
|
||||
|
||||
# If no parser_config provided, return default
|
||||
if not parser_config:
|
||||
return default_config
|
||||
|
||||
# If parser_config is provided, merge with defaults to ensure required fields exist
|
||||
if default_config is None:
|
||||
return parser_config
|
||||
|
||||
# Ensure raptor and graphrag fields have default values if not provided
|
||||
merged_config = deep_merge(default_config, parser_config)
|
||||
|
||||
return merged_config
|
||||
|
||||
|
||||
def get_data_openai(
|
||||
id=None,
|
||||
created=None,
|
||||
model=None,
|
||||
prompt_tokens=0,
|
||||
completion_tokens=0,
|
||||
content=None,
|
||||
finish_reason=None,
|
||||
object="chat.completion",
|
||||
param=None,
|
||||
stream=False
|
||||
):
|
||||
total_tokens = prompt_tokens + completion_tokens
|
||||
|
||||
if stream:
|
||||
return {
|
||||
"id": f"{id}",
|
||||
"object": "chat.completion.chunk",
|
||||
"model": model,
|
||||
"choices": [{
|
||||
"delta": {"content": content},
|
||||
"finish_reason": finish_reason,
|
||||
"index": 0,
|
||||
}],
|
||||
}
|
||||
|
||||
return {
|
||||
"id": f"{id}",
|
||||
"object": object,
|
||||
"created": int(time.time()) if created else None,
|
||||
"model": model,
|
||||
"param": param,
|
||||
"usage": {
|
||||
"prompt_tokens": prompt_tokens,
|
||||
"completion_tokens": completion_tokens,
|
||||
"total_tokens": total_tokens,
|
||||
"completion_tokens_details": {
|
||||
"reasoning_tokens": 0,
|
||||
"accepted_prediction_tokens": 0,
|
||||
"rejected_prediction_tokens": 0,
|
||||
},
|
||||
},
|
||||
"choices": [{
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": content
|
||||
},
|
||||
"logprobs": None,
|
||||
"finish_reason": finish_reason,
|
||||
"index": 0,
|
||||
}],
|
||||
}
|
||||
|
||||
|
||||
def check_duplicate_ids(ids, id_type="item"):
|
||||
"""
|
||||
Check for duplicate IDs in a list and return unique IDs and error messages.
|
||||
|
||||
Args:
|
||||
ids (list): List of IDs to check for duplicates
|
||||
id_type (str): Type of ID for error messages (e.g., 'document', 'dataset', 'chunk')
|
||||
|
||||
Returns:
|
||||
tuple: (unique_ids, error_messages)
|
||||
- unique_ids (list): List of unique IDs
|
||||
- error_messages (list): List of error messages for duplicate IDs
|
||||
"""
|
||||
id_count = {}
|
||||
duplicate_messages = []
|
||||
|
||||
# Count occurrences of each ID
|
||||
for id_value in ids:
|
||||
id_count[id_value] = id_count.get(id_value, 0) + 1
|
||||
|
||||
# Check for duplicates
|
||||
for id_value, count in id_count.items():
|
||||
if count > 1:
|
||||
duplicate_messages.append(f"Duplicate {id_type} ids: {id_value}")
|
||||
|
||||
# Return unique IDs and error messages
|
||||
return list(set(ids)), duplicate_messages
|
||||
|
||||
|
||||
def verify_embedding_availability(embd_id: str, tenant_id: str) -> tuple[bool, JSONResponse | None]:
|
||||
"""
|
||||
Verifies availability of an embedding model for a specific tenant.
|
||||
|
||||
Performs comprehensive verification through:
|
||||
1. Identifier Parsing: Decomposes embd_id into name and factory components
|
||||
2. System Verification: Checks model registration in LLMService
|
||||
3. Tenant Authorization: Validates tenant-specific model assignments
|
||||
4. Built-in Model Check: Confirms inclusion in predefined system models
|
||||
|
||||
Args:
|
||||
embd_id (str): Unique identifier for the embedding model in format "model_name@factory"
|
||||
tenant_id (str): Tenant identifier for access control
|
||||
|
||||
Returns:
|
||||
tuple[bool, Response | None]:
|
||||
- First element (bool):
|
||||
- True: Model is available and authorized
|
||||
- False: Validation failed
|
||||
- Second element contains:
|
||||
- None on success
|
||||
- Error detail dict on failure
|
||||
|
||||
Raises:
|
||||
ValueError: When model identifier format is invalid
|
||||
OperationalError: When database connection fails (auto-handled)
|
||||
|
||||
Examples:
|
||||
>>> verify_embedding_availability("text-embedding@openai", "tenant_123")
|
||||
(True, None)
|
||||
|
||||
>>> verify_embedding_availability("invalid_model", "tenant_123")
|
||||
(False, {'code': 101, 'message': "Unsupported model: <invalid_model>"})
|
||||
"""
|
||||
try:
|
||||
llm_name, llm_factory = TenantLLMService.split_model_name_and_factory(embd_id)
|
||||
in_llm_service = bool(LLMService.query(llm_name=llm_name, fid=llm_factory, model_type="embedding"))
|
||||
|
||||
tenant_llms = TenantLLMService.get_my_llms(tenant_id=tenant_id)
|
||||
is_tenant_model = any(llm["llm_name"] == llm_name and llm["llm_factory"] == llm_factory and llm["model_type"] == "embedding" for llm in tenant_llms)
|
||||
|
||||
is_builtin_model = embd_id in settings.BUILTIN_EMBEDDING_MODELS
|
||||
if not (is_builtin_model or is_tenant_model or in_llm_service):
|
||||
return False, get_error_argument_result(f"Unsupported model: <{embd_id}>")
|
||||
|
||||
if not (is_builtin_model or is_tenant_model):
|
||||
return False, get_error_argument_result(f"Unauthorized model: <{embd_id}>")
|
||||
except OperationalError as e:
|
||||
logging.exception(e)
|
||||
return False, get_error_data_result(message="Database operation failed")
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def deep_merge(default: dict, custom: dict) -> dict:
|
||||
"""
|
||||
Recursively merges two dictionaries with priority given to `custom` values.
|
||||
|
||||
Creates a deep copy of the `default` dictionary and iteratively merges nested
|
||||
dictionaries using a stack-based approach. Non-dict values in `custom` will
|
||||
completely override corresponding entries in `default`.
|
||||
|
||||
Args:
|
||||
default (dict): Base dictionary containing default values.
|
||||
custom (dict): Dictionary containing overriding values.
|
||||
|
||||
Returns:
|
||||
dict: New merged dictionary combining values from both inputs.
|
||||
|
||||
Example:
|
||||
>>> from copy import deepcopy
|
||||
>>> default = {"a": 1, "nested": {"x": 10, "y": 20}}
|
||||
>>> custom = {"b": 2, "nested": {"y": 99, "z": 30}}
|
||||
>>> deep_merge(default, custom)
|
||||
{'a': 1, 'b': 2, 'nested': {'x': 10, 'y': 99, 'z': 30}}
|
||||
|
||||
>>> deep_merge({"config": {"mode": "auto"}}, {"config": "manual"})
|
||||
{'config': 'manual'}
|
||||
|
||||
Notes:
|
||||
1. Merge priority is always given to `custom` values at all nesting levels
|
||||
2. Non-dict values (e.g. list, str) in `custom` will replace entire values
|
||||
in `default`, even if the original value was a dictionary
|
||||
3. Time complexity: O(N) where N is total key-value pairs in `custom`
|
||||
4. Recommended for configuration merging and nested data updates
|
||||
"""
|
||||
merged = deepcopy(default)
|
||||
stack = [(merged, custom)]
|
||||
|
||||
while stack:
|
||||
base_dict, override_dict = stack.pop()
|
||||
|
||||
for key, val in override_dict.items():
|
||||
if key in base_dict and isinstance(val, dict) and isinstance(base_dict[key], dict):
|
||||
stack.append((base_dict[key], val))
|
||||
else:
|
||||
base_dict[key] = val
|
||||
|
||||
return merged
|
||||
|
||||
|
||||
def remap_dictionary_keys(source_data: dict, key_aliases: dict = None) -> dict:
|
||||
"""
|
||||
Transform dictionary keys using a configurable mapping schema.
|
||||
|
||||
Args:
|
||||
source_data: Original dictionary to process
|
||||
key_aliases: Custom key transformation rules (Optional)
|
||||
When provided, overrides default key mapping
|
||||
Format: {<original_key>: <new_key>, ...}
|
||||
|
||||
Returns:
|
||||
dict: New dictionary with transformed keys preserving original values
|
||||
|
||||
Example:
|
||||
>>> input_data = {"old_key": "value", "another_field": 42}
|
||||
>>> remap_dictionary_keys(input_data, {"old_key": "new_key"})
|
||||
{'new_key': 'value', 'another_field': 42}
|
||||
"""
|
||||
DEFAULT_KEY_MAP = {
|
||||
"chunk_num": "chunk_count",
|
||||
"doc_num": "document_count",
|
||||
"parser_id": "chunk_method",
|
||||
"embd_id": "embedding_model",
|
||||
}
|
||||
|
||||
transformed_data = {}
|
||||
mapping = key_aliases or DEFAULT_KEY_MAP
|
||||
|
||||
for original_key, value in source_data.items():
|
||||
mapped_key = mapping.get(original_key, original_key)
|
||||
transformed_data[mapped_key] = value
|
||||
|
||||
return transformed_data
|
||||
|
||||
|
||||
def group_by(list_of_dict, key):
|
||||
res = {}
|
||||
for item in list_of_dict:
|
||||
if item[key] in res.keys():
|
||||
res[item[key]].append(item)
|
||||
else:
|
||||
res[item[key]] = [item]
|
||||
return res
|
||||
|
||||
|
||||
def get_mcp_tools(mcp_servers: list, timeout: float | int = 10) -> tuple[dict, str]:
|
||||
results = {}
|
||||
tool_call_sessions = []
|
||||
try:
|
||||
for mcp_server in mcp_servers:
|
||||
server_key = mcp_server.id
|
||||
|
||||
cached_tools = mcp_server.variables.get("tools", {})
|
||||
|
||||
tool_call_session = MCPToolCallSession(mcp_server, mcp_server.variables)
|
||||
tool_call_sessions.append(tool_call_session)
|
||||
|
||||
try:
|
||||
tools = tool_call_session.get_tools(timeout)
|
||||
except Exception:
|
||||
tools = []
|
||||
|
||||
results[server_key] = []
|
||||
for tool in tools:
|
||||
tool_dict = tool.model_dump()
|
||||
cached_tool = cached_tools.get(tool_dict["name"], {})
|
||||
|
||||
tool_dict["enabled"] = cached_tool.get("enabled", True)
|
||||
results[server_key].append(tool_dict)
|
||||
|
||||
# PERF: blocking call to close sessions — consider moving to background thread or task queue
|
||||
close_multiple_mcp_toolcall_sessions(tool_call_sessions)
|
||||
return results, ""
|
||||
except Exception as e:
|
||||
return {}, str(e)
|
||||
|
||||
|
||||
TimeoutException = Union[Type[BaseException], BaseException]
|
||||
OnTimeoutCallback = Union[Callable[..., Any], Coroutine[Any, Any, Any]]
|
||||
|
||||
|
||||
def timeout(seconds: float | int | str = None, attempts: int = 2, *, exception: Optional[TimeoutException] = None, on_timeout: Optional[OnTimeoutCallback] = None):
|
||||
if isinstance(seconds, str):
|
||||
seconds = float(seconds)
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
result_queue = queue.Queue(maxsize=1)
|
||||
|
||||
def target():
|
||||
try:
|
||||
result = func(*args, **kwargs)
|
||||
result_queue.put(result)
|
||||
except Exception as e:
|
||||
result_queue.put(e)
|
||||
|
||||
thread = threading.Thread(target=target)
|
||||
thread.daemon = True
|
||||
thread.start()
|
||||
|
||||
for a in range(attempts):
|
||||
try:
|
||||
if os.environ.get("ENABLE_TIMEOUT_ASSERTION"):
|
||||
result = result_queue.get(timeout=seconds)
|
||||
else:
|
||||
result = result_queue.get()
|
||||
if isinstance(result, Exception):
|
||||
raise result
|
||||
return result
|
||||
except queue.Empty:
|
||||
pass
|
||||
raise TimeoutError(f"Function '{func.__name__}' timed out after {seconds} seconds and {attempts} attempts.")
|
||||
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs) -> Any:
|
||||
if seconds is None:
|
||||
return await func(*args, **kwargs)
|
||||
|
||||
for a in range(attempts):
|
||||
try:
|
||||
if os.environ.get("ENABLE_TIMEOUT_ASSERTION"):
|
||||
with trio.fail_after(seconds):
|
||||
return await func(*args, **kwargs)
|
||||
else:
|
||||
return await func(*args, **kwargs)
|
||||
except trio.TooSlowError:
|
||||
if a < attempts - 1:
|
||||
continue
|
||||
if on_timeout is not None:
|
||||
if callable(on_timeout):
|
||||
result = on_timeout()
|
||||
if isinstance(result, Coroutine):
|
||||
return await result
|
||||
return result
|
||||
return on_timeout
|
||||
|
||||
if exception is None:
|
||||
raise TimeoutError(f"Operation timed out after {seconds} seconds and {attempts} attempts.")
|
||||
|
||||
if isinstance(exception, BaseException):
|
||||
raise exception
|
||||
|
||||
if isinstance(exception, type) and issubclass(exception, BaseException):
|
||||
raise exception(f"Operation timed out after {seconds} seconds and {attempts} attempts.")
|
||||
|
||||
raise RuntimeError("Invalid exception type provided")
|
||||
|
||||
if asyncio.iscoroutinefunction(func):
|
||||
return async_wrapper
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
async def is_strong_enough(chat_model, embedding_model):
|
||||
count = settings.STRONG_TEST_COUNT
|
||||
if not chat_model or not embedding_model:
|
||||
return
|
||||
if isinstance(count, int) and count <= 0:
|
||||
return
|
||||
|
||||
@timeout(60, 2)
|
||||
async def _is_strong_enough():
|
||||
nonlocal chat_model, embedding_model
|
||||
if embedding_model:
|
||||
with trio.fail_after(10):
|
||||
_ = await trio.to_thread.run_sync(lambda: embedding_model.encode(["Are you strong enough!?"]))
|
||||
if chat_model:
|
||||
with trio.fail_after(30):
|
||||
res = await trio.to_thread.run_sync(lambda: chat_model.chat("Nothing special.", [{"role": "user", "content": "Are you strong enough!?"}], {}))
|
||||
if res.find("**ERROR**") >= 0:
|
||||
raise Exception(res)
|
||||
|
||||
# Pressure test for GraphRAG task
|
||||
async with trio.open_nursery() as nursery:
|
||||
for _ in range(count):
|
||||
nursery.start_soon(_is_strong_enough)
|
||||
56
api/utils/base64_image.py
Normal file
56
api/utils/base64_image.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import base64
|
||||
import logging
|
||||
from functools import partial
|
||||
from io import BytesIO
|
||||
|
||||
from PIL import Image
|
||||
|
||||
test_image_base64 = "iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAIAAAD/gAIDAAAA6ElEQVR4nO3QwQ3AIBDAsIP9d25XIC+EZE8QZc18w5l9O+AlZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBWYFZgVmBT+IYAHHLHkdEgAAAABJRU5ErkJggg=="
|
||||
test_image = base64.b64decode(test_image_base64)
|
||||
|
||||
|
||||
async def image2id(d: dict, storage_put_func: partial, objname:str, bucket:str="imagetemps"):
|
||||
import logging
|
||||
from io import BytesIO
|
||||
import trio
|
||||
from rag.svr.task_executor import minio_limiter
|
||||
if not d.get("image"):
|
||||
return
|
||||
|
||||
with BytesIO() as output_buffer:
|
||||
if isinstance(d["image"], bytes):
|
||||
output_buffer.write(d["image"])
|
||||
output_buffer.seek(0)
|
||||
else:
|
||||
# If the image is in RGBA mode, convert it to RGB mode before saving it in JPEG format.
|
||||
if d["image"].mode in ("RGBA", "P"):
|
||||
converted_image = d["image"].convert("RGB")
|
||||
d["image"] = converted_image
|
||||
try:
|
||||
d["image"].save(output_buffer, format='JPEG')
|
||||
except OSError as e:
|
||||
logging.warning(
|
||||
"Saving image exception, ignore: {}".format(str(e)))
|
||||
|
||||
async with minio_limiter:
|
||||
await trio.to_thread.run_sync(lambda: storage_put_func(bucket=bucket, fnm=objname, binary=output_buffer.getvalue()))
|
||||
d["img_id"] = f"{bucket}-{objname}"
|
||||
if not isinstance(d["image"], bytes):
|
||||
d["image"].close()
|
||||
del d["image"] # Remove image reference
|
||||
|
||||
|
||||
def id2image(image_id:str|None, storage_get_func: partial):
|
||||
if not image_id:
|
||||
return
|
||||
arr = image_id.split("-")
|
||||
if len(arr) != 2:
|
||||
return
|
||||
bkt, nm = image_id.split("-")
|
||||
try:
|
||||
blob = storage_get_func(bucket=bkt, filename=nm)
|
||||
if not blob:
|
||||
return
|
||||
return Image.open(BytesIO(blob))
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
78
api/utils/commands.py
Normal file
78
api/utils/commands.py
Normal file
@@ -0,0 +1,78 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import base64
|
||||
import click
|
||||
import re
|
||||
|
||||
from flask import Flask
|
||||
from werkzeug.security import generate_password_hash
|
||||
|
||||
from api.db.services import UserService
|
||||
|
||||
|
||||
@click.command('reset-password', help='Reset the account password.')
|
||||
@click.option('--email', prompt=True, help='The email address of the account whose password you need to reset')
|
||||
@click.option('--new-password', prompt=True, help='the new password.')
|
||||
@click.option('--password-confirm', prompt=True, help='the new password confirm.')
|
||||
def reset_password(email, new_password, password_confirm):
|
||||
if str(new_password).strip() != str(password_confirm).strip():
|
||||
click.echo(click.style('sorry. The two passwords do not match.', fg='red'))
|
||||
return
|
||||
user = UserService.query(email=email)
|
||||
if not user:
|
||||
click.echo(click.style('sorry. The Email is not registered!.', fg='red'))
|
||||
return
|
||||
encode_password = base64.b64encode(new_password.encode('utf-8')).decode('utf-8')
|
||||
password_hash = generate_password_hash(encode_password)
|
||||
user_dict = {
|
||||
'password': password_hash
|
||||
}
|
||||
UserService.update_user(user[0].id,user_dict)
|
||||
click.echo(click.style('Congratulations! Password has been reset.', fg='green'))
|
||||
|
||||
|
||||
@click.command('reset-email', help='Reset the account email.')
|
||||
@click.option('--email', prompt=True, help='The old email address of the account whose email you need to reset')
|
||||
@click.option('--new-email', prompt=True, help='the new email.')
|
||||
@click.option('--email-confirm', prompt=True, help='the new email confirm.')
|
||||
def reset_email(email, new_email, email_confirm):
|
||||
if str(new_email).strip() != str(email_confirm).strip():
|
||||
click.echo(click.style('Sorry, new email and confirm email do not match.', fg='red'))
|
||||
return
|
||||
if str(new_email).strip() == str(email).strip():
|
||||
click.echo(click.style('Sorry, new email and old email are the same.', fg='red'))
|
||||
return
|
||||
user = UserService.query(email=email)
|
||||
if not user:
|
||||
click.echo(click.style('sorry. the account: [{}] not exist .'.format(email), fg='red'))
|
||||
return
|
||||
if not re.match(r"^[\w\._-]+@([\w_-]+\.)+[\w-]{2,4}$", new_email):
|
||||
click.echo(click.style('sorry. {} is not a valid email. '.format(new_email), fg='red'))
|
||||
return
|
||||
new_user = UserService.query(email=new_email)
|
||||
if new_user:
|
||||
click.echo(click.style('sorry. the account: [{}] is exist .'.format(new_email), fg='red'))
|
||||
return
|
||||
user_dict = {
|
||||
'email': new_email
|
||||
}
|
||||
UserService.update_user(user[0].id,user_dict)
|
||||
click.echo(click.style('Congratulations!, email has been reset.', fg='green'))
|
||||
|
||||
def register_commands(app: Flask):
|
||||
app.cli.add_command(reset_password)
|
||||
app.cli.add_command(reset_email)
|
||||
46
api/utils/common.py
Normal file
46
api/utils/common.py
Normal file
@@ -0,0 +1,46 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
def string_to_bytes(string):
|
||||
return string if isinstance(
|
||||
string, bytes) else string.encode(encoding="utf-8")
|
||||
|
||||
|
||||
def bytes_to_string(byte):
|
||||
return byte.decode(encoding="utf-8")
|
||||
|
||||
|
||||
def convert_bytes(size_in_bytes: int) -> str:
|
||||
"""
|
||||
Format size in bytes.
|
||||
"""
|
||||
if size_in_bytes == 0:
|
||||
return "0 B"
|
||||
|
||||
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
|
||||
i = 0
|
||||
size = float(size_in_bytes)
|
||||
|
||||
while size >= 1024 and i < len(units) - 1:
|
||||
size /= 1024
|
||||
i += 1
|
||||
|
||||
if i == 0 or size >= 100:
|
||||
return f"{size:.0f} {units[i]}"
|
||||
elif size >= 10:
|
||||
return f"{size:.1f} {units[i]}"
|
||||
else:
|
||||
return f"{size:.2f} {units[i]}"
|
||||
179
api/utils/configs.py
Normal file
179
api/utils/configs.py
Normal file
@@ -0,0 +1,179 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import io
|
||||
import copy
|
||||
import logging
|
||||
import base64
|
||||
import pickle
|
||||
import importlib
|
||||
|
||||
from api.utils import file_utils
|
||||
from filelock import FileLock
|
||||
from api.utils.common import bytes_to_string, string_to_bytes
|
||||
from api.constants import SERVICE_CONF
|
||||
|
||||
|
||||
def conf_realpath(conf_name):
|
||||
conf_path = f"conf/{conf_name}"
|
||||
return os.path.join(file_utils.get_project_base_directory(), conf_path)
|
||||
|
||||
|
||||
def read_config(conf_name=SERVICE_CONF):
|
||||
local_config = {}
|
||||
local_path = conf_realpath(f'local.{conf_name}')
|
||||
|
||||
# load local config file
|
||||
if os.path.exists(local_path):
|
||||
local_config = file_utils.load_yaml_conf(local_path)
|
||||
if not isinstance(local_config, dict):
|
||||
raise ValueError(f'Invalid config file: "{local_path}".')
|
||||
|
||||
global_config_path = conf_realpath(conf_name)
|
||||
global_config = file_utils.load_yaml_conf(global_config_path)
|
||||
|
||||
if not isinstance(global_config, dict):
|
||||
raise ValueError(f'Invalid config file: "{global_config_path}".')
|
||||
|
||||
global_config.update(local_config)
|
||||
return global_config
|
||||
|
||||
|
||||
CONFIGS = read_config()
|
||||
|
||||
|
||||
def show_configs():
|
||||
msg = f"Current configs, from {conf_realpath(SERVICE_CONF)}:"
|
||||
for k, v in CONFIGS.items():
|
||||
if isinstance(v, dict):
|
||||
if "password" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["password"] = "*" * 8
|
||||
if "access_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["access_key"] = "*" * 8
|
||||
if "secret_key" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["secret_key"] = "*" * 8
|
||||
if "secret" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["secret"] = "*" * 8
|
||||
if "sas_token" in v:
|
||||
v = copy.deepcopy(v)
|
||||
v["sas_token"] = "*" * 8
|
||||
if "oauth" in k:
|
||||
v = copy.deepcopy(v)
|
||||
for key, val in v.items():
|
||||
if "client_secret" in val:
|
||||
val["client_secret"] = "*" * 8
|
||||
if "authentication" in k:
|
||||
v = copy.deepcopy(v)
|
||||
for key, val in v.items():
|
||||
if "http_secret_key" in val:
|
||||
val["http_secret_key"] = "*" * 8
|
||||
msg += f"\n\t{k}: {v}"
|
||||
logging.info(msg)
|
||||
|
||||
|
||||
def get_base_config(key, default=None):
|
||||
if key is None:
|
||||
return None
|
||||
if default is None:
|
||||
default = os.environ.get(key.upper())
|
||||
return CONFIGS.get(key, default)
|
||||
|
||||
|
||||
def decrypt_database_password(password):
|
||||
encrypt_password = get_base_config("encrypt_password", False)
|
||||
encrypt_module = get_base_config("encrypt_module", False)
|
||||
private_key = get_base_config("private_key", None)
|
||||
|
||||
if not password or not encrypt_password:
|
||||
return password
|
||||
|
||||
if not private_key:
|
||||
raise ValueError("No private key")
|
||||
|
||||
module_fun = encrypt_module.split("#")
|
||||
pwdecrypt_fun = getattr(
|
||||
importlib.import_module(
|
||||
module_fun[0]),
|
||||
module_fun[1])
|
||||
|
||||
return pwdecrypt_fun(private_key, password)
|
||||
|
||||
|
||||
def decrypt_database_config(
|
||||
database=None, passwd_key="password", name="database"):
|
||||
if not database:
|
||||
database = get_base_config(name, {})
|
||||
|
||||
database[passwd_key] = decrypt_database_password(database[passwd_key])
|
||||
return database
|
||||
|
||||
|
||||
def update_config(key, value, conf_name=SERVICE_CONF):
|
||||
conf_path = conf_realpath(conf_name=conf_name)
|
||||
if not os.path.isabs(conf_path):
|
||||
conf_path = os.path.join(
|
||||
file_utils.get_project_base_directory(), conf_path)
|
||||
|
||||
with FileLock(os.path.join(os.path.dirname(conf_path), ".lock")):
|
||||
config = file_utils.load_yaml_conf(conf_path=conf_path) or {}
|
||||
config[key] = value
|
||||
file_utils.rewrite_yaml_conf(conf_path=conf_path, config=config)
|
||||
|
||||
|
||||
safe_module = {
|
||||
'numpy',
|
||||
'rag_flow'
|
||||
}
|
||||
|
||||
|
||||
class RestrictedUnpickler(pickle.Unpickler):
|
||||
def find_class(self, module, name):
|
||||
import importlib
|
||||
if module.split('.')[0] in safe_module:
|
||||
_module = importlib.import_module(module)
|
||||
return getattr(_module, name)
|
||||
# Forbid everything else.
|
||||
raise pickle.UnpicklingError("global '%s.%s' is forbidden" %
|
||||
(module, name))
|
||||
|
||||
|
||||
def restricted_loads(src):
|
||||
"""Helper function analogous to pickle.loads()."""
|
||||
return RestrictedUnpickler(io.BytesIO(src)).load()
|
||||
|
||||
|
||||
def serialize_b64(src, to_str=False):
|
||||
dest = base64.b64encode(pickle.dumps(src))
|
||||
if not to_str:
|
||||
return dest
|
||||
else:
|
||||
return bytes_to_string(dest)
|
||||
|
||||
|
||||
def deserialize_b64(src):
|
||||
src = base64.b64decode(
|
||||
string_to_bytes(src) if isinstance(
|
||||
src, str) else src)
|
||||
use_deserialize_safe_module = get_base_config(
|
||||
'use_deserialize_safe_module', False)
|
||||
if use_deserialize_safe_module:
|
||||
return restricted_loads(src)
|
||||
return pickle.loads(src)
|
||||
64
api/utils/crypt.py
Normal file
64
api/utils/crypt.py
Normal file
@@ -0,0 +1,64 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import base64
|
||||
import os
|
||||
import sys
|
||||
from Cryptodome.PublicKey import RSA
|
||||
from Cryptodome.Cipher import PKCS1_v1_5 as Cipher_pkcs1_v1_5
|
||||
from api.utils import file_utils
|
||||
|
||||
|
||||
def crypt(line):
|
||||
"""
|
||||
decrypt(crypt(input_string)) == base64(input_string), which frontend and admin_client use.
|
||||
"""
|
||||
file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "public.pem")
|
||||
rsa_key = RSA.importKey(open(file_path).read(), "Welcome")
|
||||
cipher = Cipher_pkcs1_v1_5.new(rsa_key)
|
||||
password_base64 = base64.b64encode(line.encode('utf-8')).decode("utf-8")
|
||||
encrypted_password = cipher.encrypt(password_base64.encode())
|
||||
return base64.b64encode(encrypted_password).decode('utf-8')
|
||||
|
||||
|
||||
def decrypt(line):
|
||||
file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "private.pem")
|
||||
rsa_key = RSA.importKey(open(file_path).read(), "Welcome")
|
||||
cipher = Cipher_pkcs1_v1_5.new(rsa_key)
|
||||
return cipher.decrypt(base64.b64decode(line), "Fail to decrypt password!").decode('utf-8')
|
||||
|
||||
|
||||
def decrypt2(crypt_text):
|
||||
from base64 import b64decode, b16decode
|
||||
from Crypto.Cipher import PKCS1_v1_5 as Cipher_PKCS1_v1_5
|
||||
from Crypto.PublicKey import RSA
|
||||
decode_data = b64decode(crypt_text)
|
||||
if len(decode_data) == 127:
|
||||
hex_fixed = '00' + decode_data.hex()
|
||||
decode_data = b16decode(hex_fixed.upper())
|
||||
|
||||
file_path = os.path.join(file_utils.get_project_base_directory(), "conf", "private.pem")
|
||||
pem = open(file_path).read()
|
||||
rsa_key = RSA.importKey(pem, "Welcome")
|
||||
cipher = Cipher_PKCS1_v1_5.new(rsa_key)
|
||||
decrypt_text = cipher.decrypt(decode_data, None)
|
||||
return (b64decode(decrypt_text)).decode()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
passwd = crypt(sys.argv[1])
|
||||
print(passwd)
|
||||
print(decrypt(passwd))
|
||||
286
api/utils/file_utils.py
Normal file
286
api/utils/file_utils.py
Normal file
@@ -0,0 +1,286 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import threading
|
||||
from io import BytesIO
|
||||
|
||||
import pdfplumber
|
||||
from cachetools import LRUCache, cached
|
||||
from PIL import Image
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
from api.constants import IMG_BASE64_PREFIX
|
||||
from api.db import FileType
|
||||
|
||||
PROJECT_BASE = os.getenv("RAG_PROJECT_BASE") or os.getenv("RAG_DEPLOY_BASE")
|
||||
RAG_BASE = os.getenv("RAG_BASE")
|
||||
|
||||
LOCK_KEY_pdfplumber = "global_shared_lock_pdfplumber"
|
||||
if LOCK_KEY_pdfplumber not in sys.modules:
|
||||
sys.modules[LOCK_KEY_pdfplumber] = threading.Lock()
|
||||
|
||||
|
||||
def get_project_base_directory(*args):
|
||||
global PROJECT_BASE
|
||||
if PROJECT_BASE is None:
|
||||
PROJECT_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
)
|
||||
)
|
||||
|
||||
if args:
|
||||
return os.path.join(PROJECT_BASE, *args)
|
||||
return PROJECT_BASE
|
||||
|
||||
|
||||
def get_rag_directory(*args):
|
||||
global RAG_BASE
|
||||
if RAG_BASE is None:
|
||||
RAG_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
)
|
||||
)
|
||||
if args:
|
||||
return os.path.join(RAG_BASE, *args)
|
||||
return RAG_BASE
|
||||
|
||||
|
||||
def get_rag_python_directory(*args):
|
||||
return get_rag_directory("python", *args)
|
||||
|
||||
|
||||
def get_home_cache_dir():
|
||||
dir = os.path.join(os.path.expanduser("~"), ".ragflow")
|
||||
try:
|
||||
os.mkdir(dir)
|
||||
except OSError:
|
||||
pass
|
||||
return dir
|
||||
|
||||
|
||||
@cached(cache=LRUCache(maxsize=10))
|
||||
def load_json_conf(conf_path):
|
||||
if os.path.isabs(conf_path):
|
||||
json_conf_path = conf_path
|
||||
else:
|
||||
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def dump_json_conf(config_data, conf_path):
|
||||
if os.path.isabs(conf_path):
|
||||
json_conf_path = conf_path
|
||||
else:
|
||||
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(json_conf_path, "w") as f:
|
||||
json.dump(config_data, f, indent=4)
|
||||
except BaseException:
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_json_conf_real_time(conf_path):
|
||||
if os.path.isabs(conf_path):
|
||||
json_conf_path = conf_path
|
||||
else:
|
||||
json_conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(json_conf_path) as f:
|
||||
return json.load(f)
|
||||
except BaseException:
|
||||
raise EnvironmentError("loading json file config from '{}' failed!".format(json_conf_path))
|
||||
|
||||
|
||||
def load_yaml_conf(conf_path):
|
||||
if not os.path.isabs(conf_path):
|
||||
conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(conf_path) as f:
|
||||
yaml = YAML(typ="safe", pure=True)
|
||||
return yaml.load(f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError("loading yaml file config from {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_yaml_conf(conf_path, config):
|
||||
if not os.path.isabs(conf_path):
|
||||
conf_path = os.path.join(get_project_base_directory(), conf_path)
|
||||
try:
|
||||
with open(conf_path, "w") as f:
|
||||
yaml = YAML(typ="safe")
|
||||
yaml.dump(config, f)
|
||||
except Exception as e:
|
||||
raise EnvironmentError("rewrite yaml file config {} failed:".format(conf_path), e)
|
||||
|
||||
|
||||
def rewrite_json_file(filepath, json_data):
|
||||
with open(filepath, "w", encoding="utf-8") as f:
|
||||
json.dump(json_data, f, indent=4, separators=(",", ": "))
|
||||
f.close()
|
||||
|
||||
|
||||
def filename_type(filename):
|
||||
filename = filename.lower()
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
return FileType.PDF.value
|
||||
|
||||
if re.match(r".*\.(msg|eml|doc|docx|ppt|pptx|yml|xml|htm|json|jsonl|ldjson|csv|txt|ini|xls|xlsx|wps|rtf|hlp|pages|numbers|key|md|py|js|java|c|cpp|h|php|go|ts|sh|cs|kt|html|sql)$", filename):
|
||||
return FileType.DOC.value
|
||||
|
||||
if re.match(r".*\.(wav|flac|ape|alac|wavpack|wv|mp3|aac|ogg|vorbis|opus)$", filename):
|
||||
return FileType.AURAL.value
|
||||
|
||||
if re.match(r".*\.(jpg|jpeg|png|tif|gif|pcx|tga|exif|fpx|svg|psd|cdr|pcd|dxf|ufo|eps|ai|raw|WMF|webp|avif|apng|icon|ico|mpg|mpeg|avi|rm|rmvb|mov|wmv|asf|dat|asx|wvx|mpe|mpa|mp4)$", filename):
|
||||
return FileType.VISUAL.value
|
||||
|
||||
return FileType.OTHER.value
|
||||
|
||||
|
||||
def thumbnail_img(filename, blob):
|
||||
"""
|
||||
MySQL LongText max length is 65535
|
||||
"""
|
||||
filename = filename.lower()
|
||||
if re.match(r".*\.pdf$", filename):
|
||||
with sys.modules[LOCK_KEY_pdfplumber]:
|
||||
pdf = pdfplumber.open(BytesIO(blob))
|
||||
|
||||
buffered = BytesIO()
|
||||
resolution = 32
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://github.com/jsvine/pdfplumber?tab=readme-ov-file#creating-a-pageimage-with-to_image
|
||||
pdf.pages[0].to_image(resolution=resolution).annotated.save(buffered, format="png")
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000 and resolution >= 2:
|
||||
resolution = resolution / 2
|
||||
buffered = BytesIO()
|
||||
else:
|
||||
break
|
||||
pdf.close()
|
||||
return img
|
||||
|
||||
elif re.match(r".*\.(jpg|jpeg|png|tif|gif|icon|ico|webp)$", filename):
|
||||
image = Image.open(BytesIO(blob))
|
||||
image.thumbnail((30, 30))
|
||||
buffered = BytesIO()
|
||||
image.save(buffered, format="png")
|
||||
return buffered.getvalue()
|
||||
|
||||
elif re.match(r".*\.(ppt|pptx)$", filename):
|
||||
import aspose.pydrawing as drawing
|
||||
import aspose.slides as slides
|
||||
|
||||
try:
|
||||
with slides.Presentation(BytesIO(blob)) as presentation:
|
||||
buffered = BytesIO()
|
||||
scale = 0.03
|
||||
img = None
|
||||
for _ in range(10):
|
||||
# https://reference.aspose.com/slides/python-net/aspose.slides/slide/get_thumbnail/#float-float
|
||||
presentation.slides[0].get_thumbnail(scale, scale).save(buffered, drawing.imaging.ImageFormat.png)
|
||||
img = buffered.getvalue()
|
||||
if len(img) >= 64000:
|
||||
scale = scale / 2.0
|
||||
buffered = BytesIO()
|
||||
else:
|
||||
break
|
||||
return img
|
||||
except Exception:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def thumbnail(filename, blob):
|
||||
img = thumbnail_img(filename, blob)
|
||||
if img is not None:
|
||||
return IMG_BASE64_PREFIX + base64.b64encode(img).decode("utf-8")
|
||||
else:
|
||||
return ""
|
||||
|
||||
|
||||
def traversal_files(base):
|
||||
for root, ds, fs in os.walk(base):
|
||||
for f in fs:
|
||||
fullname = os.path.join(root, f)
|
||||
yield fullname
|
||||
|
||||
|
||||
def repair_pdf_with_ghostscript(input_bytes):
|
||||
if shutil.which("gs") is None:
|
||||
return input_bytes
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".pdf") as temp_in, tempfile.NamedTemporaryFile(suffix=".pdf") as temp_out:
|
||||
temp_in.write(input_bytes)
|
||||
temp_in.flush()
|
||||
|
||||
cmd = [
|
||||
"gs",
|
||||
"-o",
|
||||
temp_out.name,
|
||||
"-sDEVICE=pdfwrite",
|
||||
"-dPDFSETTINGS=/prepress",
|
||||
temp_in.name,
|
||||
]
|
||||
try:
|
||||
proc = subprocess.run(cmd, capture_output=True, text=True)
|
||||
if proc.returncode != 0:
|
||||
return input_bytes
|
||||
except Exception:
|
||||
return input_bytes
|
||||
|
||||
temp_out.seek(0)
|
||||
repaired_bytes = temp_out.read()
|
||||
|
||||
return repaired_bytes
|
||||
|
||||
|
||||
def read_potential_broken_pdf(blob):
|
||||
def try_open(blob):
|
||||
try:
|
||||
with pdfplumber.open(BytesIO(blob)) as pdf:
|
||||
if pdf.pages:
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
return False
|
||||
|
||||
if try_open(blob):
|
||||
return blob
|
||||
|
||||
repaired = repair_pdf_with_ghostscript(blob)
|
||||
if try_open(repaired):
|
||||
return repaired
|
||||
|
||||
return blob
|
||||
104
api/utils/health.py
Normal file
104
api/utils/health.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from api import settings
|
||||
from api.db.db_models import DB
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
|
||||
|
||||
def _ok_nok(ok: bool) -> str:
|
||||
return "ok" if ok else "nok"
|
||||
|
||||
|
||||
def check_db() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
# lightweight probe; works for MySQL/Postgres
|
||||
DB.execute_sql("SELECT 1")
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_redis() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
ok = bool(REDIS_CONN.health())
|
||||
return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_doc_engine() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
meta = settings.docStoreConn.health()
|
||||
# treat any successful call as ok
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", **(meta or {})}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_storage() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
STORAGE_IMPL.health()
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_chat() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
cfg = getattr(settings, "CHAT_CFG", None)
|
||||
ok = bool(cfg and cfg.get("factory"))
|
||||
return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def run_health_checks() -> tuple[dict, bool]:
|
||||
result: dict[str, str | dict] = {}
|
||||
|
||||
db_ok, db_meta = check_db()
|
||||
chat_ok, chat_meta = check_chat()
|
||||
|
||||
result["db"] = _ok_nok(db_ok)
|
||||
if not db_ok:
|
||||
result.setdefault("_meta", {})["db"] = db_meta
|
||||
|
||||
result["chat"] = _ok_nok(chat_ok)
|
||||
if not chat_ok:
|
||||
result.setdefault("_meta", {})["chat"] = chat_meta
|
||||
|
||||
# Optional probes (do not change minimal contract but exposed for observability)
|
||||
try:
|
||||
redis_ok, redis_meta = check_redis()
|
||||
result["redis"] = _ok_nok(redis_ok)
|
||||
if not redis_ok:
|
||||
result.setdefault("_meta", {})["redis"] = redis_meta
|
||||
except Exception:
|
||||
result["redis"] = "nok"
|
||||
|
||||
try:
|
||||
doc_ok, doc_meta = check_doc_engine()
|
||||
result["doc_engine"] = _ok_nok(doc_ok)
|
||||
if not doc_ok:
|
||||
result.setdefault("_meta", {})["doc_engine"] = doc_meta
|
||||
except Exception:
|
||||
result["doc_engine"] = "nok"
|
||||
|
||||
try:
|
||||
sto_ok, sto_meta = check_storage()
|
||||
result["storage"] = _ok_nok(sto_ok)
|
||||
if not sto_ok:
|
||||
result.setdefault("_meta", {})["storage"] = sto_meta
|
||||
except Exception:
|
||||
result["storage"] = "nok"
|
||||
|
||||
all_ok = (result.get("db") == "ok") and (result.get("chat") == "ok")
|
||||
result["status"] = "ok" if all_ok else "nok"
|
||||
return result, all_ok
|
||||
|
||||
|
||||
200
api/utils/health_utils.py
Normal file
200
api/utils/health_utils.py
Normal file
@@ -0,0 +1,200 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import requests
|
||||
from timeit import default_timer as timer
|
||||
|
||||
from api import settings
|
||||
from api.db.db_models import DB
|
||||
from rag import settings as rag_settings
|
||||
from rag.utils.redis_conn import REDIS_CONN
|
||||
from rag.utils.storage_factory import STORAGE_IMPL
|
||||
from rag.utils.es_conn import ESConnection
|
||||
from rag.utils.infinity_conn import InfinityConnection
|
||||
|
||||
|
||||
def _ok_nok(ok: bool) -> str:
|
||||
return "ok" if ok else "nok"
|
||||
|
||||
|
||||
def check_db() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
# lightweight probe; works for MySQL/Postgres
|
||||
DB.execute_sql("SELECT 1")
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_redis() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
ok = bool(REDIS_CONN.health())
|
||||
return ok, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_doc_engine() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
meta = settings.docStoreConn.health()
|
||||
# treat any successful call as ok
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", **(meta or {})}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def check_storage() -> tuple[bool, dict]:
|
||||
st = timer()
|
||||
try:
|
||||
STORAGE_IMPL.health()
|
||||
return True, {"elapsed": f"{(timer() - st) * 1000.0:.1f}"}
|
||||
except Exception as e:
|
||||
return False, {"elapsed": f"{(timer() - st) * 1000.0:.1f}", "error": str(e)}
|
||||
|
||||
|
||||
def get_es_cluster_stats() -> dict:
|
||||
doc_engine = os.getenv('DOC_ENGINE', 'elasticsearch')
|
||||
if doc_engine != 'elasticsearch':
|
||||
raise Exception("Elasticsearch is not in use.")
|
||||
try:
|
||||
return {
|
||||
"alive": True,
|
||||
"message": ESConnection().get_cluster_stats()
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def get_infinity_status():
|
||||
doc_engine = os.getenv('DOC_ENGINE', 'elasticsearch')
|
||||
if doc_engine != 'infinity':
|
||||
raise Exception("Infinity is not in use.")
|
||||
try:
|
||||
return {
|
||||
"alive": True,
|
||||
"message": InfinityConnection().health()
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def get_mysql_status():
|
||||
try:
|
||||
cursor = DB.execute_sql("SHOW PROCESSLIST;")
|
||||
res_rows = cursor.fetchall()
|
||||
headers = ['id', 'user', 'host', 'db', 'command', 'time', 'state', 'info']
|
||||
cursor.close()
|
||||
return {
|
||||
"alive": True,
|
||||
"message": [dict(zip(headers, r)) for r in res_rows]
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def check_minio_alive():
|
||||
start_time = timer()
|
||||
try:
|
||||
response = requests.get(f'http://{rag_settings.MINIO["host"]}/minio/health/live')
|
||||
if response.status_code == 200:
|
||||
return {'alive': True, "message": f"Confirm elapsed: {(timer() - start_time) * 1000.0:.1f} ms."}
|
||||
else:
|
||||
return {'alive': False, "message": f"Confirm elapsed: {(timer() - start_time) * 1000.0:.1f} ms."}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def get_redis_info():
|
||||
try:
|
||||
return {
|
||||
"alive": True,
|
||||
"message": REDIS_CONN.info()
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def check_ragflow_server_alive():
|
||||
start_time = timer()
|
||||
try:
|
||||
response = requests.get(f'http://{settings.HOST_IP}:{settings.HOST_PORT}/v1/system/ping')
|
||||
if response.status_code == 200:
|
||||
return {'alive': True, "message": f"Confirm elapsed: {(timer() - start_time) * 1000.0:.1f} ms."}
|
||||
else:
|
||||
return {'alive': False, "message": f"Confirm elapsed: {(timer() - start_time) * 1000.0:.1f} ms."}
|
||||
except Exception as e:
|
||||
return {
|
||||
"alive": False,
|
||||
"message": f"error: {str(e)}",
|
||||
}
|
||||
|
||||
|
||||
def run_health_checks() -> tuple[dict, bool]:
|
||||
result: dict[str, str | dict] = {}
|
||||
|
||||
db_ok, db_meta = check_db()
|
||||
result["db"] = _ok_nok(db_ok)
|
||||
if not db_ok:
|
||||
result.setdefault("_meta", {})["db"] = db_meta
|
||||
|
||||
try:
|
||||
redis_ok, redis_meta = check_redis()
|
||||
result["redis"] = _ok_nok(redis_ok)
|
||||
if not redis_ok:
|
||||
result.setdefault("_meta", {})["redis"] = redis_meta
|
||||
except Exception:
|
||||
result["redis"] = "nok"
|
||||
|
||||
try:
|
||||
doc_ok, doc_meta = check_doc_engine()
|
||||
result["doc_engine"] = _ok_nok(doc_ok)
|
||||
if not doc_ok:
|
||||
result.setdefault("_meta", {})["doc_engine"] = doc_meta
|
||||
except Exception:
|
||||
result["doc_engine"] = "nok"
|
||||
|
||||
try:
|
||||
sto_ok, sto_meta = check_storage()
|
||||
result["storage"] = _ok_nok(sto_ok)
|
||||
if not sto_ok:
|
||||
result.setdefault("_meta", {})["storage"] = sto_meta
|
||||
except Exception:
|
||||
result["storage"] = "nok"
|
||||
|
||||
|
||||
all_ok = (result.get("db") == "ok") and (result.get("redis") == "ok") and (result.get("doc_engine") == "ok") and (result.get("storage") == "ok")
|
||||
result["status"] = "ok" if all_ok else "nok"
|
||||
return result, all_ok
|
||||
|
||||
|
||||
78
api/utils/json.py
Normal file
78
api/utils/json.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import datetime
|
||||
import json
|
||||
from enum import Enum, IntEnum
|
||||
from api.utils.common import string_to_bytes, bytes_to_string
|
||||
|
||||
|
||||
class BaseType:
|
||||
def to_dict(self):
|
||||
return dict([(k.lstrip("_"), v) for k, v in self.__dict__.items()])
|
||||
|
||||
def to_dict_with_type(self):
|
||||
def _dict(obj):
|
||||
module = None
|
||||
if issubclass(obj.__class__, BaseType):
|
||||
data = {}
|
||||
for attr, v in obj.__dict__.items():
|
||||
k = attr.lstrip("_")
|
||||
data[k] = _dict(v)
|
||||
module = obj.__module__
|
||||
elif isinstance(obj, (list, tuple)):
|
||||
data = []
|
||||
for i, vv in enumerate(obj):
|
||||
data.append(_dict(vv))
|
||||
elif isinstance(obj, dict):
|
||||
data = {}
|
||||
for _k, vv in obj.items():
|
||||
data[_k] = _dict(vv)
|
||||
else:
|
||||
data = obj
|
||||
return {"type": obj.__class__.__name__,
|
||||
"data": data, "module": module}
|
||||
|
||||
return _dict(self)
|
||||
|
||||
|
||||
class CustomJSONEncoder(json.JSONEncoder):
|
||||
def __init__(self, **kwargs):
|
||||
self._with_type = kwargs.pop("with_type", False)
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def default(self, obj):
|
||||
if isinstance(obj, datetime.datetime):
|
||||
return obj.strftime('%Y-%m-%d %H:%M:%S')
|
||||
elif isinstance(obj, datetime.date):
|
||||
return obj.strftime('%Y-%m-%d')
|
||||
elif isinstance(obj, datetime.timedelta):
|
||||
return str(obj)
|
||||
elif issubclass(type(obj), Enum) or issubclass(type(obj), IntEnum):
|
||||
return obj.value
|
||||
elif isinstance(obj, set):
|
||||
return list(obj)
|
||||
elif issubclass(type(obj), BaseType):
|
||||
if not self._with_type:
|
||||
return obj.to_dict()
|
||||
else:
|
||||
return obj.to_dict_with_type()
|
||||
elif isinstance(obj, type):
|
||||
return obj.__name__
|
||||
else:
|
||||
return json.JSONEncoder.default(self, obj)
|
||||
|
||||
|
||||
def json_dumps(src, byte=False, indent=None, with_type=False):
|
||||
dest = json.dumps(
|
||||
src,
|
||||
indent=indent,
|
||||
cls=CustomJSONEncoder,
|
||||
with_type=with_type)
|
||||
if byte:
|
||||
dest = string_to_bytes(dest)
|
||||
return dest
|
||||
|
||||
|
||||
def json_loads(src, object_hook=None, object_pairs_hook=None):
|
||||
if isinstance(src, bytes):
|
||||
src = bytes_to_string(src)
|
||||
return json.loads(src, object_hook=object_hook,
|
||||
object_pairs_hook=object_pairs_hook)
|
||||
91
api/utils/log_utils.py
Normal file
91
api/utils/log_utils.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import os
|
||||
import os.path
|
||||
import logging
|
||||
from logging.handlers import RotatingFileHandler
|
||||
|
||||
initialized_root_logger = False
|
||||
|
||||
def get_project_base_directory():
|
||||
PROJECT_BASE = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
os.pardir,
|
||||
os.pardir,
|
||||
)
|
||||
)
|
||||
return PROJECT_BASE
|
||||
|
||||
def init_root_logger(logfile_basename: str, log_format: str = "%(asctime)-15s %(levelname)-8s %(process)d %(message)s"):
|
||||
global initialized_root_logger
|
||||
if initialized_root_logger:
|
||||
return
|
||||
initialized_root_logger = True
|
||||
|
||||
logger = logging.getLogger()
|
||||
logger.handlers.clear()
|
||||
log_path = os.path.abspath(os.path.join(get_project_base_directory(), "logs", f"{logfile_basename}.log"))
|
||||
|
||||
os.makedirs(os.path.dirname(log_path), exist_ok=True)
|
||||
formatter = logging.Formatter(log_format)
|
||||
|
||||
handler1 = RotatingFileHandler(log_path, maxBytes=10*1024*1024, backupCount=5)
|
||||
handler1.setFormatter(formatter)
|
||||
logger.addHandler(handler1)
|
||||
|
||||
handler2 = logging.StreamHandler()
|
||||
handler2.setFormatter(formatter)
|
||||
logger.addHandler(handler2)
|
||||
|
||||
logging.captureWarnings(True)
|
||||
|
||||
LOG_LEVELS = os.environ.get("LOG_LEVELS", "")
|
||||
pkg_levels = {}
|
||||
for pkg_name_level in LOG_LEVELS.split(","):
|
||||
terms = pkg_name_level.split("=")
|
||||
if len(terms)!= 2:
|
||||
continue
|
||||
pkg_name, pkg_level = terms[0], terms[1]
|
||||
pkg_name = pkg_name.strip()
|
||||
pkg_level = logging.getLevelName(pkg_level.strip().upper())
|
||||
if not isinstance(pkg_level, int):
|
||||
pkg_level = logging.INFO
|
||||
pkg_levels[pkg_name] = logging.getLevelName(pkg_level)
|
||||
|
||||
for pkg_name in ['peewee', 'pdfminer']:
|
||||
if pkg_name not in pkg_levels:
|
||||
pkg_levels[pkg_name] = logging.getLevelName(logging.WARNING)
|
||||
if 'root' not in pkg_levels:
|
||||
pkg_levels['root'] = logging.getLevelName(logging.INFO)
|
||||
|
||||
for pkg_name, pkg_level in pkg_levels.items():
|
||||
pkg_logger = logging.getLogger(pkg_name)
|
||||
pkg_logger.setLevel(pkg_level)
|
||||
|
||||
msg = f"{logfile_basename} log path: {log_path}, log levels: {pkg_levels}"
|
||||
logger.info(msg)
|
||||
|
||||
|
||||
def log_exception(e, *args):
|
||||
logging.exception(e)
|
||||
for a in args:
|
||||
if hasattr(a, "text"):
|
||||
logging.error(a.text)
|
||||
raise Exception(a.text)
|
||||
else:
|
||||
logging.error(str(a))
|
||||
raise e
|
||||
636
api/utils/validation_utils.py
Normal file
636
api/utils/validation_utils.py
Normal file
@@ -0,0 +1,636 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from collections import Counter
|
||||
from typing import Annotated, Any, Literal
|
||||
from uuid import UUID
|
||||
|
||||
from flask import Request
|
||||
from pydantic import (
|
||||
BaseModel,
|
||||
ConfigDict,
|
||||
Field,
|
||||
StringConstraints,
|
||||
ValidationError,
|
||||
field_validator,
|
||||
)
|
||||
from pydantic_core import PydanticCustomError
|
||||
from werkzeug.exceptions import BadRequest, UnsupportedMediaType
|
||||
|
||||
from api.constants import DATASET_NAME_LIMIT
|
||||
|
||||
|
||||
def validate_and_parse_json_request(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None, exclude_unset: bool = False) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses JSON requests through a multi-stage validation pipeline.
|
||||
|
||||
Implements a four-stage validation process:
|
||||
1. Content-Type verification (must be application/json)
|
||||
2. JSON syntax validation
|
||||
3. Payload structure type checking
|
||||
4. Pydantic model validation with error formatting
|
||||
|
||||
Args:
|
||||
request (Request): Flask request object containing HTTP payload
|
||||
validator (type[BaseModel]): Pydantic model class for data validation
|
||||
extras (dict[str, Any] | None): Additional fields to merge into payload
|
||||
before validation. These fields will be removed from the final output
|
||||
exclude_unset (bool): Whether to exclude fields that have not been explicitly set
|
||||
|
||||
Returns:
|
||||
tuple[Dict[str, Any] | None, str | None]:
|
||||
- First element:
|
||||
- Validated dictionary on success
|
||||
- None on validation failure
|
||||
- Second element:
|
||||
- None on success
|
||||
- Diagnostic error message on failure
|
||||
|
||||
Raises:
|
||||
UnsupportedMediaType: When Content-Type header is not application/json
|
||||
BadRequest: For structural JSON syntax errors
|
||||
ValidationError: When payload violates Pydantic schema rules
|
||||
|
||||
Examples:
|
||||
>>> validate_and_parse_json_request(valid_request, DatasetSchema)
|
||||
({"name": "Dataset1", "format": "csv"}, None)
|
||||
|
||||
>>> validate_and_parse_json_request(xml_request, DatasetSchema)
|
||||
(None, "Unsupported content type: Expected application/json, got text/xml")
|
||||
|
||||
>>> validate_and_parse_json_request(bad_json_request, DatasetSchema)
|
||||
(None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding")
|
||||
|
||||
Notes:
|
||||
1. Validation Priority:
|
||||
- Content-Type verification precedes JSON parsing
|
||||
- Structural validation occurs before schema validation
|
||||
2. Extra fields added via `extras` parameter are automatically removed
|
||||
from the final output after validation
|
||||
"""
|
||||
try:
|
||||
payload = request.get_json() or {}
|
||||
except UnsupportedMediaType:
|
||||
return None, f"Unsupported content type: Expected application/json, got {request.content_type}"
|
||||
except BadRequest:
|
||||
return None, "Malformed JSON syntax: Missing commas/brackets or invalid encoding"
|
||||
|
||||
if not isinstance(payload, dict):
|
||||
return None, f"Invalid request payload: expected object, got {type(payload).__name__}"
|
||||
|
||||
try:
|
||||
if extras is not None:
|
||||
payload.update(extras)
|
||||
validated_request = validator(**payload)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_payload = validated_request.model_dump(by_alias=True, exclude_unset=exclude_unset)
|
||||
|
||||
if extras is not None:
|
||||
for key in list(parsed_payload.keys()):
|
||||
if key in extras:
|
||||
del parsed_payload[key]
|
||||
|
||||
return parsed_payload, None
|
||||
|
||||
|
||||
def validate_and_parse_request_args(request: Request, validator: type[BaseModel], *, extras: dict[str, Any] | None = None) -> tuple[dict[str, Any] | None, str | None]:
|
||||
"""
|
||||
Validates and parses request arguments against a Pydantic model.
|
||||
|
||||
This function performs a complete request validation workflow:
|
||||
1. Extracts query parameters from the request
|
||||
2. Merges with optional extra values (if provided)
|
||||
3. Validates against the specified Pydantic model
|
||||
4. Cleans the output by removing extra values
|
||||
5. Returns either parsed data or an error message
|
||||
|
||||
Args:
|
||||
request (Request): Web framework request object containing query parameters
|
||||
validator (type[BaseModel]): Pydantic model class for validation
|
||||
extras (dict[str, Any] | None): Optional additional values to include in validation
|
||||
but exclude from final output. Defaults to None.
|
||||
|
||||
Returns:
|
||||
tuple[dict[str, Any] | None, str | None]:
|
||||
- First element: Validated/parsed arguments as dict if successful, None otherwise
|
||||
- Second element: Formatted error message if validation failed, None otherwise
|
||||
|
||||
Behavior:
|
||||
- Query parameters are merged with extras before validation
|
||||
- Extras are automatically removed from the final output
|
||||
- All validation errors are formatted into a human-readable string
|
||||
|
||||
Raises:
|
||||
TypeError: If validator is not a Pydantic BaseModel subclass
|
||||
|
||||
Examples:
|
||||
Successful validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
({'param1': 'value'}, None)
|
||||
|
||||
Failed validation:
|
||||
>>> validate_and_parse_request_args(request, MyValidator)
|
||||
(None, "param1: Field required")
|
||||
|
||||
With extras:
|
||||
>>> validate_and_parse_request_args(request, MyValidator, extras={'internal_id': 123})
|
||||
({'param1': 'value'}, None) # internal_id removed from output
|
||||
|
||||
Notes:
|
||||
- Uses request.args.to_dict() for Flask-compatible parameter extraction
|
||||
- Maintains immutability of original request arguments
|
||||
- Preserves type conversion from Pydantic validation
|
||||
"""
|
||||
args = request.args.to_dict(flat=True)
|
||||
try:
|
||||
if extras is not None:
|
||||
args.update(extras)
|
||||
validated_args = validator(**args)
|
||||
except ValidationError as e:
|
||||
return None, format_validation_error_message(e)
|
||||
|
||||
parsed_args = validated_args.model_dump()
|
||||
if extras is not None:
|
||||
for key in list(parsed_args.keys()):
|
||||
if key in extras:
|
||||
del parsed_args[key]
|
||||
|
||||
return parsed_args, None
|
||||
|
||||
|
||||
def format_validation_error_message(e: ValidationError) -> str:
|
||||
"""
|
||||
Formats validation errors into a standardized string format.
|
||||
|
||||
Processes pydantic ValidationError objects to create human-readable error messages
|
||||
containing field locations, error descriptions, and input values.
|
||||
|
||||
Args:
|
||||
e (ValidationError): The validation error instance containing error details
|
||||
|
||||
Returns:
|
||||
str: Formatted error messages joined by newlines. Each line contains:
|
||||
- Field path (dot-separated)
|
||||
- Error message
|
||||
- Truncated input value (max 128 chars)
|
||||
|
||||
Example:
|
||||
>>> try:
|
||||
... UserModel(name=123, email="invalid")
|
||||
... except ValidationError as e:
|
||||
... print(format_validation_error_message(e))
|
||||
Field: <name> - Message: <Input should be a valid string> - Value: <123>
|
||||
Field: <email> - Message: <value is not a valid email address> - Value: <invalid>
|
||||
"""
|
||||
error_messages = []
|
||||
|
||||
for error in e.errors():
|
||||
field = ".".join(map(str, error["loc"]))
|
||||
msg = error["msg"]
|
||||
input_val = error["input"]
|
||||
input_str = str(input_val)
|
||||
|
||||
if len(input_str) > 128:
|
||||
input_str = input_str[:125] + "..."
|
||||
|
||||
error_msg = f"Field: <{field}> - Message: <{msg}> - Value: <{input_str}>"
|
||||
error_messages.append(error_msg)
|
||||
|
||||
return "\n".join(error_messages)
|
||||
|
||||
|
||||
def normalize_str(v: Any) -> Any:
|
||||
"""
|
||||
Normalizes string values to a standard format while preserving non-string inputs.
|
||||
|
||||
Performs the following transformations when input is a string:
|
||||
1. Trims leading/trailing whitespace (str.strip())
|
||||
2. Converts to lowercase (str.lower())
|
||||
|
||||
Non-string inputs are returned unchanged, making this function safe for mixed-type
|
||||
processing pipelines.
|
||||
|
||||
Args:
|
||||
v (Any): Input value to normalize. Accepts any Python object.
|
||||
|
||||
Returns:
|
||||
Any: Normalized string if input was string-type, original value otherwise.
|
||||
|
||||
Behavior Examples:
|
||||
String Input: " Admin " → "admin"
|
||||
Empty String: " " → "" (empty string)
|
||||
Non-String:
|
||||
- 123 → 123
|
||||
- None → None
|
||||
- ["User"] → ["User"]
|
||||
|
||||
Typical Use Cases:
|
||||
- Standardizing user input
|
||||
- Preparing data for case-insensitive comparison
|
||||
- Cleaning API parameters
|
||||
- Normalizing configuration values
|
||||
|
||||
Edge Cases:
|
||||
- Unicode whitespace is handled by str.strip()
|
||||
- Locale-independent lowercasing (str.lower())
|
||||
- Preserves falsy values (0, False, etc.)
|
||||
|
||||
Example:
|
||||
>>> normalize_str(" ReadOnly ")
|
||||
'readonly'
|
||||
>>> normalize_str(42)
|
||||
42
|
||||
"""
|
||||
if isinstance(v, str):
|
||||
stripped = v.strip()
|
||||
normalized = stripped.lower()
|
||||
return normalized
|
||||
return v
|
||||
|
||||
|
||||
def validate_uuid1_hex(v: Any) -> str:
|
||||
"""
|
||||
Validates and converts input to a UUID version 1 hexadecimal string.
|
||||
|
||||
This function performs strict validation and normalization:
|
||||
1. Accepts either UUID objects or UUID-formatted strings
|
||||
2. Verifies the UUID is version 1 (time-based)
|
||||
3. Returns the 32-character hexadecimal representation
|
||||
|
||||
Args:
|
||||
v (Any): Input value to validate. Can be:
|
||||
- UUID object (must be version 1)
|
||||
- String in UUID format (e.g. "550e8400-e29b-41d4-a716-446655440000")
|
||||
|
||||
Returns:
|
||||
str: 32-character lowercase hexadecimal string without hyphens
|
||||
Example: "550e8400e29b41d4a716446655440000"
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With code "invalid_UUID1_format" when:
|
||||
- Input is not a UUID object or valid UUID string
|
||||
- UUID version is not 1
|
||||
- String doesn't match UUID format
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_uuid1_hex("550e8400-e29b-41d4-a716-446655440000")
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
>>> validate_uuid1_hex(UUID('550e8400-e29b-41d4-a716-446655440000'))
|
||||
'550e8400e29b41d4a716446655440000'
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_uuid1_hex("not-a-uuid") # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(12345) # raises PydanticCustomError
|
||||
>>> validate_uuid1_hex(UUID(int=0)) # v4, raises PydanticCustomError
|
||||
|
||||
Notes:
|
||||
- Uses Python's built-in UUID parser for format validation
|
||||
- Version check prevents accidental use of other UUID versions
|
||||
- Hyphens in input strings are automatically removed in output
|
||||
"""
|
||||
try:
|
||||
uuid_obj = UUID(v) if isinstance(v, str) else v
|
||||
if uuid_obj.version != 1:
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Must be a UUID1 format")
|
||||
return uuid_obj.hex
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
raise PydanticCustomError("invalid_UUID1_format", "Invalid UUID1 format")
|
||||
|
||||
|
||||
class Base(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid", strict=True)
|
||||
|
||||
|
||||
class RaptorConfig(Base):
|
||||
use_raptor: Annotated[bool, Field(default=False)]
|
||||
prompt: Annotated[
|
||||
str,
|
||||
StringConstraints(strip_whitespace=True, min_length=1),
|
||||
Field(
|
||||
default="Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:\n {cluster_content}\nThe above is the content you need to summarize."
|
||||
),
|
||||
]
|
||||
max_token: Annotated[int, Field(default=256, ge=1, le=2048)]
|
||||
threshold: Annotated[float, Field(default=0.1, ge=0.0, le=1.0)]
|
||||
max_cluster: Annotated[int, Field(default=64, ge=1, le=1024)]
|
||||
random_seed: Annotated[int, Field(default=0, ge=0)]
|
||||
|
||||
|
||||
class GraphragConfig(Base):
|
||||
use_graphrag: Annotated[bool, Field(default=False)]
|
||||
entity_types: Annotated[list[str], Field(default_factory=lambda: ["organization", "person", "geo", "event", "category"])]
|
||||
method: Annotated[Literal["light", "general"], Field(default="light")]
|
||||
community: Annotated[bool, Field(default=False)]
|
||||
resolution: Annotated[bool, Field(default=False)]
|
||||
|
||||
|
||||
class ParserConfig(Base):
|
||||
auto_keywords: Annotated[int, Field(default=0, ge=0, le=32)]
|
||||
auto_questions: Annotated[int, Field(default=0, ge=0, le=10)]
|
||||
chunk_token_num: Annotated[int, Field(default=512, ge=1, le=2048)]
|
||||
delimiter: Annotated[str, Field(default=r"\n", min_length=1)]
|
||||
graphrag: Annotated[GraphragConfig, Field(default_factory=lambda: GraphragConfig(use_graphrag=False))]
|
||||
html4excel: Annotated[bool, Field(default=False)]
|
||||
layout_recognize: Annotated[str, Field(default="DeepDOC")]
|
||||
raptor: Annotated[RaptorConfig, Field(default_factory=lambda: RaptorConfig(use_raptor=False))]
|
||||
tag_kb_ids: Annotated[list[str], Field(default_factory=list)]
|
||||
topn_tags: Annotated[int, Field(default=1, ge=1, le=10)]
|
||||
filename_embd_weight: Annotated[float | None, Field(default=0.1, ge=0.0, le=1.0)]
|
||||
task_page_size: Annotated[int | None, Field(default=None, ge=1)]
|
||||
pages: Annotated[list[list[int]] | None, Field(default=None)]
|
||||
|
||||
|
||||
class CreateDatasetReq(Base):
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(...)]
|
||||
avatar: Annotated[str | None, Field(default=None, max_length=65535)]
|
||||
description: Annotated[str | None, Field(default=None, max_length=65535)]
|
||||
embedding_model: Annotated[str | None, Field(default=None, max_length=255, serialization_alias="embd_id")]
|
||||
permission: Annotated[Literal["me", "team"], Field(default="me", min_length=1, max_length=16)]
|
||||
chunk_method: Annotated[
|
||||
Literal["naive", "book", "email", "laws", "manual", "one", "paper", "picture", "presentation", "qa", "table", "tag"],
|
||||
Field(default="naive", min_length=1, max_length=32, serialization_alias="parser_id"),
|
||||
]
|
||||
parser_config: Annotated[ParserConfig | None, Field(default=None)]
|
||||
|
||||
@field_validator("avatar", mode="after")
|
||||
@classmethod
|
||||
def validate_avatar_base64(cls, v: str | None) -> str | None:
|
||||
"""
|
||||
Validates Base64-encoded avatar string format and MIME type compliance.
|
||||
|
||||
Implements a three-stage validation workflow:
|
||||
1. MIME prefix existence check
|
||||
2. MIME type format validation
|
||||
3. Supported type verification
|
||||
|
||||
Args:
|
||||
v (str): Raw avatar field value
|
||||
|
||||
Returns:
|
||||
str: Validated Base64 string
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For structural errors in these cases:
|
||||
- Missing MIME prefix header
|
||||
- Invalid MIME prefix format
|
||||
- Unsupported image MIME type
|
||||
|
||||
Example:
|
||||
```python
|
||||
# Valid case
|
||||
CreateDatasetReq(avatar="data:image/png;base64,iVBORw0KGg...")
|
||||
|
||||
# Invalid cases
|
||||
CreateDatasetReq(avatar="image/jpeg;base64,...") # Missing 'data:' prefix
|
||||
CreateDatasetReq(avatar="data:video/mp4;base64,...") # Unsupported MIME type
|
||||
```
|
||||
"""
|
||||
if v is None:
|
||||
return v
|
||||
|
||||
if "," in v:
|
||||
prefix, _ = v.split(",", 1)
|
||||
if not prefix.startswith("data:"):
|
||||
raise PydanticCustomError("format_invalid", "Invalid MIME prefix format. Must start with 'data:'")
|
||||
|
||||
mime_type = prefix[5:].split(";")[0]
|
||||
supported_mime_types = ["image/jpeg", "image/png"]
|
||||
if mime_type not in supported_mime_types:
|
||||
raise PydanticCustomError("format_invalid", "Unsupported MIME type. Allowed: {supported_mime_types}", {"supported_mime_types": supported_mime_types})
|
||||
|
||||
return v
|
||||
else:
|
||||
raise PydanticCustomError("format_invalid", "Missing MIME prefix. Expected format: data:<mime>;base64,<data>")
|
||||
|
||||
@field_validator("embedding_model", mode="before")
|
||||
@classmethod
|
||||
def normalize_embedding_model(cls, v: Any) -> Any:
|
||||
"""Normalize embedding model string by stripping whitespace"""
|
||||
if isinstance(v, str):
|
||||
return v.strip()
|
||||
return v
|
||||
|
||||
@field_validator("embedding_model", mode="after")
|
||||
@classmethod
|
||||
def validate_embedding_model(cls, v: str | None) -> str | None:
|
||||
"""
|
||||
Validates embedding model identifier format compliance.
|
||||
|
||||
Validation pipeline:
|
||||
1. Structural format verification
|
||||
2. Component non-empty check
|
||||
3. Value normalization
|
||||
|
||||
Args:
|
||||
v (str): Raw model identifier
|
||||
|
||||
Returns:
|
||||
str: Validated <model_name>@<provider> format
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: For these violations:
|
||||
- Missing @ separator
|
||||
- Empty model_name/provider
|
||||
- Invalid component structure
|
||||
|
||||
Examples:
|
||||
Valid: "text-embedding-3-large@openai"
|
||||
Invalid: "invalid_model" (no @)
|
||||
Invalid: "@openai" (empty model_name)
|
||||
Invalid: "text-embedding-3-large@" (empty provider)
|
||||
"""
|
||||
if isinstance(v, str):
|
||||
if "@" not in v:
|
||||
raise PydanticCustomError("format_invalid", "Embedding model identifier must follow <model_name>@<provider> format")
|
||||
|
||||
components = v.split("@", 1)
|
||||
if len(components) != 2 or not all(components):
|
||||
raise PydanticCustomError("format_invalid", "Both model_name and provider must be non-empty strings")
|
||||
|
||||
model_name, provider = components
|
||||
if not model_name.strip() or not provider.strip():
|
||||
raise PydanticCustomError("format_invalid", "Model name and provider cannot be whitespace-only strings")
|
||||
return v
|
||||
|
||||
# @field_validator("permission", mode="before")
|
||||
# @classmethod
|
||||
# def normalize_permission(cls, v: Any) -> Any:
|
||||
# return normalize_str(v)
|
||||
|
||||
@field_validator("parser_config", mode="before")
|
||||
@classmethod
|
||||
def normalize_empty_parser_config(cls, v: Any) -> Any:
|
||||
"""
|
||||
Normalizes empty parser configuration by converting empty dictionaries to None.
|
||||
|
||||
This validator ensures consistent handling of empty parser configurations across
|
||||
the application by converting empty dicts to None values.
|
||||
|
||||
Args:
|
||||
v (Any): Raw input value for the parser config field
|
||||
|
||||
Returns:
|
||||
Any: Returns None if input is an empty dict, otherwise returns the original value
|
||||
|
||||
Example:
|
||||
>>> normalize_empty_parser_config({})
|
||||
None
|
||||
|
||||
>>> normalize_empty_parser_config({"key": "value"})
|
||||
{"key": "value"}
|
||||
"""
|
||||
if v == {}:
|
||||
return None
|
||||
return v
|
||||
|
||||
@field_validator("parser_config", mode="after")
|
||||
@classmethod
|
||||
def validate_parser_config_json_length(cls, v: ParserConfig | None) -> ParserConfig | None:
|
||||
"""
|
||||
Validates serialized JSON length constraints for parser configuration.
|
||||
|
||||
Implements a two-stage validation workflow:
|
||||
1. Null check - bypass validation for empty configurations
|
||||
2. Model serialization - convert Pydantic model to JSON string
|
||||
3. Size verification - enforce maximum allowed payload size
|
||||
|
||||
Args:
|
||||
v (ParserConfig | None): Raw parser configuration object
|
||||
|
||||
Returns:
|
||||
ParserConfig | None: Validated configuration object
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: When serialized JSON exceeds 65,535 characters
|
||||
"""
|
||||
if v is None:
|
||||
return None
|
||||
|
||||
if (json_str := v.model_dump_json()) and len(json_str) > 65535:
|
||||
raise PydanticCustomError("string_too_long", "Parser config exceeds size limit (max 65,535 characters). Current size: {actual}", {"actual": len(json_str)})
|
||||
return v
|
||||
|
||||
|
||||
class UpdateDatasetReq(CreateDatasetReq):
|
||||
dataset_id: Annotated[str, Field(...)]
|
||||
name: Annotated[str, StringConstraints(strip_whitespace=True, min_length=1, max_length=DATASET_NAME_LIMIT), Field(default="")]
|
||||
pagerank: Annotated[int, Field(default=0, ge=0, le=100)]
|
||||
|
||||
@field_validator("dataset_id", mode="before")
|
||||
@classmethod
|
||||
def validate_dataset_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
|
||||
class DeleteReq(Base):
|
||||
ids: Annotated[list[str] | None, Field(...)]
|
||||
|
||||
@field_validator("ids", mode="after")
|
||||
@classmethod
|
||||
def validate_ids(cls, v_list: list[str] | None) -> list[str] | None:
|
||||
"""
|
||||
Validates and normalizes a list of UUID strings with None handling.
|
||||
|
||||
This post-processing validator performs:
|
||||
1. None input handling (pass-through)
|
||||
2. UUID version 1 validation for each list item
|
||||
3. Duplicate value detection
|
||||
4. Returns normalized UUID hex strings or None
|
||||
|
||||
Args:
|
||||
v_list (list[str] | None): Input list that has passed initial validation.
|
||||
Either a list of UUID strings or None.
|
||||
|
||||
Returns:
|
||||
list[str] | None:
|
||||
- None if input was None
|
||||
- List of normalized UUID hex strings otherwise:
|
||||
* 32-character lowercase
|
||||
* Valid UUID version 1
|
||||
* Unique within list
|
||||
|
||||
Raises:
|
||||
PydanticCustomError: With structured error details when:
|
||||
- "invalid_UUID1_format": Any string fails UUIDv1 validation
|
||||
- "duplicate_uuids": If duplicate IDs are detected
|
||||
|
||||
Validation Rules:
|
||||
- None input returns None
|
||||
- Empty list returns empty list
|
||||
- All non-None items must be valid UUIDv1
|
||||
- No duplicates permitted
|
||||
- Original order preserved
|
||||
|
||||
Examples:
|
||||
Valid cases:
|
||||
>>> validate_ids(None)
|
||||
None
|
||||
>>> validate_ids([])
|
||||
[]
|
||||
>>> validate_ids(["550e8400-e29b-41d4-a716-446655440000"])
|
||||
["550e8400e29b41d4a716446655440000"]
|
||||
|
||||
Invalid cases:
|
||||
>>> validate_ids(["invalid"])
|
||||
# raises PydanticCustomError(invalid_UUID1_format)
|
||||
>>> validate_ids(["550e...", "550e..."])
|
||||
# raises PydanticCustomError(duplicate_uuids)
|
||||
|
||||
Security Notes:
|
||||
- Validates UUID version to prevent version spoofing
|
||||
- Duplicate check prevents data injection
|
||||
- None handling maintains pipeline integrity
|
||||
"""
|
||||
if v_list is None:
|
||||
return None
|
||||
|
||||
ids_list = []
|
||||
for v in v_list:
|
||||
try:
|
||||
ids_list.append(validate_uuid1_hex(v))
|
||||
except PydanticCustomError as e:
|
||||
raise e
|
||||
|
||||
duplicates = [item for item, count in Counter(ids_list).items() if count > 1]
|
||||
if duplicates:
|
||||
duplicates_str = ", ".join(duplicates)
|
||||
raise PydanticCustomError("duplicate_uuids", "Duplicate ids: '{duplicate_ids}'", {"duplicate_ids": duplicates_str})
|
||||
|
||||
return ids_list
|
||||
|
||||
|
||||
class DeleteDatasetReq(DeleteReq): ...
|
||||
|
||||
|
||||
class BaseListReq(BaseModel):
|
||||
model_config = ConfigDict(extra="forbid")
|
||||
|
||||
id: Annotated[str | None, Field(default=None)]
|
||||
name: Annotated[str | None, Field(default=None)]
|
||||
page: Annotated[int, Field(default=1, ge=1)]
|
||||
page_size: Annotated[int, Field(default=30, ge=1)]
|
||||
orderby: Annotated[Literal["create_time", "update_time"], Field(default="create_time")]
|
||||
desc: Annotated[bool, Field(default=True)]
|
||||
|
||||
@field_validator("id", mode="before")
|
||||
@classmethod
|
||||
def validate_id(cls, v: Any) -> str:
|
||||
return validate_uuid1_hex(v)
|
||||
|
||||
|
||||
class ListDatasetReq(BaseListReq): ...
|
||||
201
api/utils/web_utils.py
Normal file
201
api/utils/web_utils.py
Normal file
@@ -0,0 +1,201 @@
|
||||
#
|
||||
# Copyright 2025 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import base64
|
||||
import ipaddress
|
||||
import json
|
||||
import re
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from api.apps import smtp_mail_server
|
||||
from flask_mail import Message
|
||||
from flask import render_template_string
|
||||
from selenium import webdriver
|
||||
from selenium.common.exceptions import TimeoutException
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
from selenium.webdriver.chrome.service import Service
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.expected_conditions import staleness_of
|
||||
from selenium.webdriver.support.ui import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
|
||||
|
||||
|
||||
CONTENT_TYPE_MAP = {
|
||||
# Office
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"doc": "application/msword",
|
||||
"pdf": "application/pdf",
|
||||
"csv": "text/csv",
|
||||
"xls": "application/vnd.ms-excel",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
# Text/code
|
||||
"txt": "text/plain",
|
||||
"py": "text/plain",
|
||||
"js": "text/plain",
|
||||
"java": "text/plain",
|
||||
"c": "text/plain",
|
||||
"cpp": "text/plain",
|
||||
"h": "text/plain",
|
||||
"php": "text/plain",
|
||||
"go": "text/plain",
|
||||
"ts": "text/plain",
|
||||
"sh": "text/plain",
|
||||
"cs": "text/plain",
|
||||
"kt": "text/plain",
|
||||
"sql": "text/plain",
|
||||
# Web
|
||||
"md": "text/markdown",
|
||||
"markdown": "text/markdown",
|
||||
"htm": "text/html",
|
||||
"html": "text/html",
|
||||
"json": "application/json",
|
||||
# Image formats
|
||||
"png": "image/png",
|
||||
"jpg": "image/jpeg",
|
||||
"jpeg": "image/jpeg",
|
||||
"gif": "image/gif",
|
||||
"bmp": "image/bmp",
|
||||
"tiff": "image/tiff",
|
||||
"tif": "image/tiff",
|
||||
"webp": "image/webp",
|
||||
"svg": "image/svg+xml",
|
||||
"ico": "image/x-icon",
|
||||
"avif": "image/avif",
|
||||
"heic": "image/heic",
|
||||
}
|
||||
|
||||
|
||||
def html2pdf(
|
||||
source: str,
|
||||
timeout: int = 2,
|
||||
install_driver: bool = True,
|
||||
print_options: dict = {},
|
||||
):
|
||||
result = __get_pdf_from_html(source, timeout, install_driver, print_options)
|
||||
return result
|
||||
|
||||
|
||||
def __send_devtools(driver, cmd, params={}):
|
||||
resource = "/session/%s/chromium/send_command_and_get_result" % driver.session_id
|
||||
url = driver.command_executor._url + resource
|
||||
body = json.dumps({"cmd": cmd, "params": params})
|
||||
response = driver.command_executor._request("POST", url, body)
|
||||
|
||||
if not response:
|
||||
raise Exception(response.get("value"))
|
||||
|
||||
return response.get("value")
|
||||
|
||||
|
||||
def __get_pdf_from_html(path: str, timeout: int, install_driver: bool, print_options: dict):
|
||||
webdriver_options = Options()
|
||||
webdriver_prefs = {}
|
||||
webdriver_options.add_argument("--headless")
|
||||
webdriver_options.add_argument("--disable-gpu")
|
||||
webdriver_options.add_argument("--no-sandbox")
|
||||
webdriver_options.add_argument("--disable-dev-shm-usage")
|
||||
webdriver_options.experimental_options["prefs"] = webdriver_prefs
|
||||
|
||||
webdriver_prefs["profile.default_content_settings"] = {"images": 2}
|
||||
|
||||
if install_driver:
|
||||
service = Service(ChromeDriverManager().install())
|
||||
driver = webdriver.Chrome(service=service, options=webdriver_options)
|
||||
else:
|
||||
driver = webdriver.Chrome(options=webdriver_options)
|
||||
|
||||
driver.get(path)
|
||||
|
||||
try:
|
||||
WebDriverWait(driver, timeout).until(staleness_of(driver.find_element(by=By.TAG_NAME, value="html")))
|
||||
except TimeoutException:
|
||||
calculated_print_options = {
|
||||
"landscape": False,
|
||||
"displayHeaderFooter": False,
|
||||
"printBackground": True,
|
||||
"preferCSSPageSize": True,
|
||||
}
|
||||
calculated_print_options.update(print_options)
|
||||
result = __send_devtools(driver, "Page.printToPDF", calculated_print_options)
|
||||
driver.quit()
|
||||
return base64.b64decode(result["data"])
|
||||
|
||||
|
||||
def is_private_ip(ip: str) -> bool:
|
||||
try:
|
||||
ip_obj = ipaddress.ip_address(ip)
|
||||
return ip_obj.is_private
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def is_valid_url(url: str) -> bool:
|
||||
if not re.match(r"(https?)://[-A-Za-z0-9+&@#/%?=~_|!:,.;]+[-A-Za-z0-9+&@#/%=~_|]", url):
|
||||
return False
|
||||
parsed_url = urlparse(url)
|
||||
hostname = parsed_url.hostname
|
||||
|
||||
if not hostname:
|
||||
return False
|
||||
try:
|
||||
ip = socket.gethostbyname(hostname)
|
||||
if is_private_ip(ip):
|
||||
return False
|
||||
except socket.gaierror:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def safe_json_parse(data: str | dict) -> dict:
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
try:
|
||||
return json.loads(data) if data else {}
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return {}
|
||||
|
||||
|
||||
def get_float(req: dict, key: str, default: float | int = 10.0) -> float:
|
||||
try:
|
||||
parsed = float(req.get(key, default))
|
||||
return parsed if parsed > 0 else default
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
INVITE_EMAIL_TMPL = """
|
||||
<p>Hi {{email}},</p>
|
||||
<p>{{inviter}} has invited you to join their team (ID: {{tenant_id}}).</p>
|
||||
<p>Click the link below to complete your registration:<br>
|
||||
<a href="{{invite_url}}">{{invite_url}}</a></p>
|
||||
<p>If you did not request this, please ignore this email.</p>
|
||||
"""
|
||||
|
||||
def send_invite_email(to_email, invite_url, tenant_id, inviter):
|
||||
from api.apps import app
|
||||
with app.app_context():
|
||||
msg = Message(subject="RAGFlow Invitation",
|
||||
recipients=[to_email])
|
||||
msg.html = render_template_string(
|
||||
INVITE_EMAIL_TMPL,
|
||||
email=to_email,
|
||||
invite_url=invite_url,
|
||||
tenant_id=tenant_id,
|
||||
inviter=inviter,
|
||||
)
|
||||
smtp_mail_server.send(msg)
|
||||
49
api/validation.py
Normal file
49
api/validation.py
Normal file
@@ -0,0 +1,49 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
|
||||
def python_version_validation():
|
||||
# Check python version
|
||||
required_python_version = (3, 10)
|
||||
if sys.version_info < required_python_version:
|
||||
logging.info(
|
||||
f"Required Python: >= {required_python_version[0]}.{required_python_version[1]}. Current Python version: {sys.version_info[0]}.{sys.version_info[1]}."
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info(f"Python version: {sys.version_info[0]}.{sys.version_info[1]}")
|
||||
|
||||
|
||||
python_version_validation()
|
||||
|
||||
|
||||
# Download nltk data
|
||||
def download_nltk_data():
|
||||
import nltk
|
||||
nltk.download('wordnet', halt_on_error=False, quiet=True)
|
||||
nltk.download('punkt_tab', halt_on_error=False, quiet=True)
|
||||
|
||||
|
||||
try:
|
||||
from multiprocessing import Pool
|
||||
pool = Pool(processes=1)
|
||||
thread = pool.apply_async(download_nltk_data)
|
||||
binary = thread.get(timeout=60)
|
||||
except Exception:
|
||||
print('\x1b[6;37;41m WARNING \x1b[0m' + "Downloading NLTK data failure.", flush=True)
|
||||
52
api/versions.py
Normal file
52
api/versions.py
Normal file
@@ -0,0 +1,52 @@
|
||||
#
|
||||
# Copyright 2024 The InfiniFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
RAGFLOW_VERSION_INFO = "unknown"
|
||||
|
||||
|
||||
def get_ragflow_version() -> str:
|
||||
global RAGFLOW_VERSION_INFO
|
||||
if RAGFLOW_VERSION_INFO != "unknown":
|
||||
return RAGFLOW_VERSION_INFO
|
||||
version_path = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)), os.pardir, "VERSION"
|
||||
)
|
||||
)
|
||||
if os.path.exists(version_path):
|
||||
with open(version_path, "r") as f:
|
||||
RAGFLOW_VERSION_INFO = f.read().strip()
|
||||
else:
|
||||
RAGFLOW_VERSION_INFO = get_closest_tag_and_count()
|
||||
LIGHTEN = int(os.environ.get("LIGHTEN", "0"))
|
||||
RAGFLOW_VERSION_INFO += " slim" if LIGHTEN == 1 else " full"
|
||||
return RAGFLOW_VERSION_INFO
|
||||
|
||||
|
||||
def get_closest_tag_and_count():
|
||||
try:
|
||||
# Get the current commit hash
|
||||
version_info = (
|
||||
subprocess.check_output(["git", "describe", "--tags", "--match=v*", "--first-parent", "--always"])
|
||||
.strip()
|
||||
.decode("utf-8")
|
||||
)
|
||||
return version_info
|
||||
except Exception:
|
||||
return "unknown"
|
||||
Reference in New Issue
Block a user