diff --git a/.env b/.env
new file mode 100644
index 0000000..c074e5b
--- /dev/null
+++ b/.env
@@ -0,0 +1 @@
+VITE_API_BASE_URL = http://150.158.121.95
diff --git a/package.json b/package.json
index 2b48f15..04856e9 100644
--- a/package.json
+++ b/package.json
@@ -19,6 +19,7 @@
"dayjs": "^1.11.18",
"i18next": "^25.5.3",
"i18next-browser-languagedetector": "^8.2.0",
+ "lodash": "^4.17.21",
"react": "^18.3.1",
"react-dom": "^18.3.1",
"react-hook-form": "^7.64.0",
@@ -30,6 +31,7 @@
},
"devDependencies": {
"@eslint/js": "^9.36.0",
+ "@types/lodash": "^4.17.20",
"@types/node": "^24.6.0",
"@types/react": "^19.1.16",
"@types/react-dom": "^19.1.9",
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 38ea68f..c6d77df 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -35,6 +35,9 @@ importers:
i18next-browser-languagedetector:
specifier: ^8.2.0
version: 8.2.0
+ lodash:
+ specifier: ^4.17.21
+ version: 4.17.21
react:
specifier: ^18.3.1
version: 18.3.1
@@ -63,6 +66,9 @@ importers:
'@eslint/js':
specifier: ^9.36.0
version: 9.37.0
+ '@types/lodash':
+ specifier: ^4.17.20
+ version: 4.17.20
'@types/node':
specifier: ^24.6.0
version: 24.7.1
@@ -717,6 +723,9 @@ packages:
'@types/json-schema@7.0.15':
resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==}
+ '@types/lodash@4.17.20':
+ resolution: {integrity: sha512-H3MHACvFUEiujabxhaI/ImO6gUrd8oOurg7LQtS7mbwIXA/cUqWrvBsaeJ23aZEPk1TAYkurjfMbSELfoCXlGA==}
+
'@types/node@24.7.1':
resolution: {integrity: sha512-CmyhGZanP88uuC5GpWU9q+fI61j2SkhO3UGMUdfYRE6Bcy0ccyzn1Rqj9YAB/ZY4kOXmNf0ocah5GtphmLMP6Q==}
@@ -2204,6 +2213,8 @@ snapshots:
'@types/json-schema@7.0.15': {}
+ '@types/lodash@4.17.20': {}
+
'@types/node@24.7.1':
dependencies:
undici-types: 7.14.0
diff --git a/src/App.tsx b/src/App.tsx
index 9feddd1..28506d3 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -2,17 +2,30 @@ import { BrowserRouter } from 'react-router-dom';
import { CssBaseline, ThemeProvider } from '@mui/material';
import { theme } from './theme';
import AppRoutes from './routes';
+import SnackbarProvider from './components/Provider/SnackbarProvider';
+
import './locales';
-function App() {
+/**
+ * 封装MaterialUIApp,将主题、基础样式和路由包裹起来
+ */
+function MaterialUIApp() {
return (
-
-
-
+
+
+
+
+
);
}
+function App() {
+ return (
+
+ );
+}
+
export default App;
diff --git a/src/components/LanguageSwitcher.tsx b/src/components/LanguageSwitcher.tsx
index 58e89f1..f6e7d4d 100644
--- a/src/components/LanguageSwitcher.tsx
+++ b/src/components/LanguageSwitcher.tsx
@@ -2,6 +2,7 @@ import React from 'react';
import { useTranslation } from 'react-i18next';
import { Button, Menu, MenuItem } from '@mui/material';
import { LanguageAbbreviation } from '../locales';
+import type { LanguageAbbreviationType } from '../locales';
interface LanguageSwitcherProps {
textColor?: string;
@@ -20,7 +21,7 @@ const LanguageSwitcher: React.FC = ({ textColor = '#fff'
setAnchorEl(null);
};
- const handleLanguageChange = (language: "en" | "zh") => {
+ const handleLanguageChange = (language: LanguageAbbreviationType) => {
i18n.changeLanguage(language);
handleClose();
};
diff --git a/src/components/Provider/SnackbarProvider.tsx b/src/components/Provider/SnackbarProvider.tsx
new file mode 100644
index 0000000..51a6e66
--- /dev/null
+++ b/src/components/Provider/SnackbarProvider.tsx
@@ -0,0 +1,115 @@
+import React, { createContext, useContext, useState, useCallback, useEffect, type PropsWithChildren } from 'react';
+import { Snackbar, Alert, type AlertColor } from '@mui/material';
+
+interface SnackbarItem {
+ id: string;
+ message: string;
+ severity: AlertColor;
+ duration?: number;
+ title?: string;
+}
+
+interface SnackbarContextType {
+ showSnackbar: (snackbar: Omit) => void;
+ showMessage: {
+ success: (message: string, duration?: number) => void;
+ error: (message: string, duration?: number) => void;
+ warning: (message: string, duration?: number) => void;
+ info: (message: string, duration?: number) => void;
+ };
+ showNotification: {
+ success: (title: string, message?: string, duration?: number) => void;
+ error: (title: string, message?: string, duration?: number) => void;
+ warning: (title: string, message?: string, duration?: number) => void;
+ info: (title: string, message?: string, duration?: number) => void;
+ };
+}
+
+const SnackbarContext = createContext(null);
+
+export const useSnackbar = () => {
+ const context = useContext(SnackbarContext);
+ if (!context) {
+ throw new Error('useSnackbar must be used within SnackbarProvider');
+ }
+ return context;
+};
+
+export default function SnackbarProvider({ children }: PropsWithChildren) {
+ const [snackbars, setSnackbars] = useState([]);
+
+ const showSnackbar = useCallback((snackbar: Omit) => {
+ const id = Date.now().toString();
+ const newSnackbar = { ...snackbar, id };
+ setSnackbars(prev => [...prev, newSnackbar]);
+ }, []);
+
+ const removeSnackbar = useCallback((id: string) => {
+ setSnackbars(prev => prev.filter(snackbar => snackbar.id !== id));
+ }, []);
+
+ const showMessage = {
+ success: (message: string, duration = 2000) =>
+ showSnackbar({ message, severity: 'success', duration }),
+ error: (message: string, duration = 4000) =>
+ showSnackbar({ message, severity: 'error', duration }),
+ warning: (message: string, duration = 3000) =>
+ showSnackbar({ message, severity: 'warning', duration }),
+ info: (message: string, duration = 2000) =>
+ showSnackbar({ message, severity: 'info', duration }),
+ };
+
+ const showNotification = {
+ success: (title: string, message?: string, duration = 5000) =>
+ showSnackbar({ title, message: message || title, severity: 'success', duration }),
+ error: (title: string, message?: string, duration = 6000) =>
+ showSnackbar({ title, message: message || title, severity: 'error', duration }),
+ warning: (title: string, message?: string, duration = 5000) =>
+ showSnackbar({ title, message: message || title, severity: 'warning', duration }),
+ info: (title: string, message?: string, duration = 4000) =>
+ showSnackbar({ title, message: message || title, severity: 'info', duration }),
+ };
+
+ // 为非组件文件提供全局实例
+ useEffect(() => {
+ if (typeof window !== 'undefined') {
+ (window as any).__snackbarInstance = { showSnackbar, showMessage, showNotification };
+ }
+ }, [showSnackbar, showMessage, showNotification]);
+
+ return (
+
+ {children}
+ {snackbars.map((snackbar, index) => (
+ removeSnackbar(snackbar.id)}
+ anchorOrigin={{ vertical: 'top', horizontal: 'center' }}
+ sx={{
+ top: `${80 + index * 70}px !important`,
+ zIndex: 9999 - index
+ }}
+ >
+ removeSnackbar(snackbar.id)}
+ severity={snackbar.severity}
+ variant="filled"
+ sx={{
+ minWidth: 300,
+ alignItems: snackbar.title ? 'flex-start' : 'center'
+ }}
+ >
+ {snackbar.title && (
+
+ {snackbar.title}
+
+ )}
+ {snackbar.message}
+
+
+ ))}
+
+ );
+}
diff --git a/src/constants/authorization.ts b/src/constants/authorization.ts
new file mode 100644
index 0000000..85b0930
--- /dev/null
+++ b/src/constants/authorization.ts
@@ -0,0 +1,3 @@
+export const Authorization = 'Authorization';
+export const Token = 'token';
+export const UserInfo = 'userInfo';
diff --git a/src/constants/knowledge.ts b/src/constants/knowledge.ts
new file mode 100644
index 0000000..78e3a7b
--- /dev/null
+++ b/src/constants/knowledge.ts
@@ -0,0 +1,107 @@
+// object freeze
+export const KNOWLEDGE_ROUTE_KEYS = Object.freeze({
+ Dataset: 'dataset',
+ Testing: 'testing',
+ Configuration: 'configuration',
+ KnowledgeGraph: 'knowledgeGraph',
+} as const)
+
+export type KnowledgeRouteKey = (typeof KNOWLEDGE_ROUTE_KEYS)[keyof typeof KNOWLEDGE_ROUTE_KEYS]
+
+export const RUNNING_STATUS_KEYS = Object.freeze({
+ UNSTART: '0', // need to run
+ RUNNING: '1', // need to cancel
+ CANCEL: '2', // need to refresh
+ DONE: '3', // need to refresh
+ FAIL: '4', // need to refresh
+} as const)
+
+export type RunningStatus = (typeof RUNNING_STATUS_KEYS)[keyof typeof RUNNING_STATUS_KEYS]
+
+export const RunningStatusMap = {
+ [RUNNING_STATUS_KEYS.UNSTART]: 'Pending',
+ [RUNNING_STATUS_KEYS.RUNNING]: 'Running',
+ [RUNNING_STATUS_KEYS.CANCEL]: 'Cancel',
+ [RUNNING_STATUS_KEYS.DONE]: 'Success',
+ [RUNNING_STATUS_KEYS.FAIL]: 'Failed',
+} as const;
+export const MODEL_VARIABLE_TYPES = Object.freeze({
+ Improvise: 'Improvise',
+ Precise: 'Precise',
+ Balance: 'Balance',
+} as const)
+
+export type ModelVariableType = (typeof MODEL_VARIABLE_TYPES)[keyof typeof MODEL_VARIABLE_TYPES]
+
+
+export const settledModelVariableMap = {
+ [MODEL_VARIABLE_TYPES.Improvise]: {
+ temperature: 0.8,
+ top_p: 0.9,
+ frequency_penalty: 0.1,
+ presence_penalty: 0.1,
+ max_tokens: 4096,
+ },
+ [MODEL_VARIABLE_TYPES.Precise]: {
+ temperature: 0.2,
+ top_p: 0.75,
+ frequency_penalty: 0.5,
+ presence_penalty: 0.5,
+ max_tokens: 4096,
+ },
+ [MODEL_VARIABLE_TYPES.Balance]: {
+ temperature: 0.5,
+ top_p: 0.85,
+ frequency_penalty: 0.3,
+ presence_penalty: 0.2,
+ max_tokens: 4096,
+ },
+} as const;
+
+export const LLM_MODEL_TYPES = Object.freeze({
+ Embedding: 'embedding',
+ Chat: 'chat',
+ Image2text: 'image2text',
+ Speech2text: 'speech2text',
+ Rerank: 'rerank',
+ TTS: 'tts',
+} as const)
+
+export type LlmModelType = (typeof LLM_MODEL_TYPES)[keyof typeof LLM_MODEL_TYPES]
+
+export const DOCUMENT_TYPES = Object.freeze({
+ Virtual: 'virtual',
+ Visual: 'visual',
+} as const)
+
+export type DocumentType = (typeof DOCUMENT_TYPES)[keyof typeof DOCUMENT_TYPES]
+
+export const DOCUMENT_PARSER_TYPES = Object.freeze({
+ Naive: 'naive',
+ Qa: 'qa',
+ Resume: 'resume',
+ Manual: 'manual',
+ Table: 'table',
+ Paper: 'paper',
+ Book: 'book',
+ Laws: 'laws',
+ Presentation: 'presentation',
+ Picture: 'picture',
+ One: 'one',
+ Audio: 'audio',
+ Email: 'email',
+ Tag: 'tag',
+ KnowledgeGraph: 'knowledge_graph',
+} as const)
+
+export type DocumentParserType = (typeof DOCUMENT_PARSER_TYPES)[keyof typeof DOCUMENT_PARSER_TYPES]
+
+export const KNOWLEDGE_SEARCH_PARAMS_KEYS = Object.freeze({
+ DocumentId: 'doc_id',
+ KnowledgeId: 'id',
+ Type: 'type',
+} as const)
+
+export type KnowledgeSearchParams = (typeof KNOWLEDGE_SEARCH_PARAMS_KEYS)[keyof typeof KNOWLEDGE_SEARCH_PARAMS_KEYS]
+
+export const DATABASE_BASE_KEY = 'dataset';
diff --git a/src/hooks/useSnackbar.ts b/src/hooks/useSnackbar.ts
new file mode 100644
index 0000000..017d847
--- /dev/null
+++ b/src/hooks/useSnackbar.ts
@@ -0,0 +1,12 @@
+import { useSnackbar } from '@/components/Provider/SnackbarProvider';
+
+// 简化的 hooks
+export const useMessage = () => {
+ const { showMessage } = useSnackbar();
+ return showMessage;
+};
+
+export const useNotification = () => {
+ const { showNotification } = useSnackbar();
+ return showNotification;
+};
\ No newline at end of file
diff --git a/src/interfaces/common.ts b/src/interfaces/common.ts
new file mode 100644
index 0000000..e0840bb
--- /dev/null
+++ b/src/interfaces/common.ts
@@ -0,0 +1,25 @@
+export interface Pagination {
+ current: number;
+ pageSize: number;
+ total: number;
+}
+
+export interface BaseState {
+ pagination: Pagination;
+ searchString: string;
+}
+
+export interface IModalProps {
+ showModal?(): void;
+ hideModal?(): void;
+ switchVisible?(visible: boolean): void;
+ visible?: boolean;
+ loading?: boolean;
+ onOk?(payload?: T): Promise | void;
+}
+
+export interface ResponseType {
+ code: number;
+ message?: string;
+ data?: any;
+}
diff --git a/src/interfaces/database/agent.ts b/src/interfaces/database/agent.ts
new file mode 100644
index 0000000..8bd33e8
--- /dev/null
+++ b/src/interfaces/database/agent.ts
@@ -0,0 +1,278 @@
+export interface ICategorizeItem {
+ name: string;
+ description?: string;
+ examples?: { value: string }[];
+ index: number;
+ to: string[];
+ uuid: string;
+}
+
+export type ICategorizeItemResult = Record<
+ string,
+ Omit & { examples: string[] }
+>;
+
+export interface ISwitchCondition {
+ items: ISwitchItem[];
+ logical_operator: string;
+ to: string[];
+}
+
+export interface ISwitchItem {
+ cpn_id: string;
+ operator: string;
+ value: string;
+}
+
+export interface ISwitchForm {
+ conditions: ISwitchCondition[];
+ end_cpn_ids: string[];
+ no: string;
+}
+
+import { AgentCategory } from '@/constants/agent';
+import { Edge, Node } from '@xyflow/react';
+import { IReference, Message } from './chat';
+
+export type DSLComponents = Record;
+
+export interface DSL {
+ components: DSLComponents;
+ history: any[];
+ path?: string[];
+ answer?: any[];
+ graph?: IGraph;
+ messages?: Message[];
+ reference?: IReference[];
+ globals: Record;
+ retrieval: IReference[];
+}
+
+export interface IOperator {
+ obj: IOperatorNode;
+ downstream: string[];
+ upstream: string[];
+ parent_id?: string;
+}
+
+export interface IOperatorNode {
+ component_name: string;
+ params: Record;
+}
+
+export declare interface IFlow {
+ avatar?: string;
+ canvas_type: null;
+ create_date: string;
+ create_time: number;
+ description: null;
+ dsl: DSL;
+ id: string;
+ title: string;
+ update_date: string;
+ update_time: number;
+ user_id: string;
+ permission: string;
+ nickname: string;
+ operator_permission: number;
+ canvas_category: string;
+}
+
+export interface IFlowTemplate {
+ avatar: string;
+ canvas_type: string;
+ create_date: string;
+ create_time: number;
+ description: string;
+ dsl: DSL;
+ id: string;
+ title: string;
+ update_date: string;
+ update_time: number;
+}
+
+export interface IGenerateForm {
+ max_tokens?: number;
+ temperature?: number;
+ top_p?: number;
+ presence_penalty?: number;
+ frequency_penalty?: number;
+ cite?: boolean;
+ prompt: number;
+ llm_id: string;
+ parameters: { key: string; component_id: string };
+}
+
+export interface ICategorizeForm extends IGenerateForm {
+ category_description: ICategorizeItemResult;
+ items: ICategorizeItem[];
+}
+
+export interface IRelevantForm extends IGenerateForm {
+ yes: string;
+ no: string;
+}
+
+export interface ISwitchItem {
+ cpn_id: string;
+ operator: string;
+ value: string;
+}
+
+export interface ISwitchForm {
+ conditions: ISwitchCondition[];
+ end_cpn_id: string;
+ no: string;
+}
+
+export interface IBeginForm {
+ prologue?: string;
+}
+
+export interface IRetrievalForm {
+ similarity_threshold?: number;
+ keywords_similarity_weight?: number;
+ top_n?: number;
+ top_k?: number;
+ rerank_id?: string;
+ empty_response?: string;
+ kb_ids: string[];
+}
+
+export interface ICodeForm {
+ arguments: Record;
+ lang: string;
+ script?: string;
+ outputs: Record;
+}
+
+export interface IAgentForm {
+ sys_prompt: string;
+ prompts: Array<{
+ role: string;
+ content: string;
+ }>;
+ max_retries: number;
+ delay_after_error: number;
+ visual_files_var: string;
+ max_rounds: number;
+ exception_method: Nullable<'comment' | 'go'>;
+ exception_comment: any;
+ exception_goto: any;
+ tools: Array<{
+ name: string;
+ component_name: string;
+ params: Record;
+ }>;
+ mcp: Array<{
+ mcp_id: string;
+ tools: Record>;
+ }>;
+ outputs: {
+ structured_output: Record>;
+ content: Record;
+ };
+}
+
+export type BaseNodeData = {
+ label: string; // operator type
+ name: string; // operator name
+ color?: string;
+ form?: TForm;
+};
+
+export type BaseNode = Node>;
+
+export type IBeginNode = BaseNode;
+export type IRetrievalNode = BaseNode;
+export type IGenerateNode = BaseNode;
+export type ICategorizeNode = BaseNode;
+export type ISwitchNode = BaseNode;
+export type IRagNode = BaseNode;
+export type IRelevantNode = BaseNode;
+export type ILogicNode = BaseNode;
+export type INoteNode = BaseNode;
+export type IMessageNode = BaseNode;
+export type IRewriteNode = BaseNode;
+export type IInvokeNode = BaseNode;
+export type ITemplateNode = BaseNode;
+export type IEmailNode = BaseNode;
+export type IIterationNode = BaseNode;
+export type IIterationStartNode = BaseNode;
+export type IKeywordNode = BaseNode;
+export type ICodeNode = BaseNode;
+export type IAgentNode = BaseNode;
+export type IToolNode = BaseNode;
+
+export type RAGFlowNodeType =
+ | IBeginNode
+ | IRetrievalNode
+ | IGenerateNode
+ | ICategorizeNode
+ | ISwitchNode
+ | IRagNode
+ | IRelevantNode
+ | ILogicNode
+ | INoteNode
+ | IMessageNode
+ | IRewriteNode
+ | IInvokeNode
+ | ITemplateNode
+ | IEmailNode
+ | IIterationNode
+ | IIterationStartNode
+ | IKeywordNode;
+
+export interface IGraph {
+ nodes: RAGFlowNodeType[];
+ edges: Edge[];
+}
+
+export interface ITraceData {
+ component_id: string;
+ trace: Array>;
+}
+
+export interface IAgentLogResponse {
+ id: string;
+ message: IAgentLogMessage[];
+ update_date: string;
+ create_date: string;
+ update_time: number;
+ create_time: number;
+ round: number;
+ thumb_up: number;
+ errors: string;
+ source: string;
+ user_id: string;
+ dsl: string;
+ reference: IReference;
+}
+export interface IAgentLogsResponse {
+ total: number;
+ sessions: IAgentLogResponse[];
+}
+export interface IAgentLogsRequest {
+ keywords?: string;
+ to_date?: string | Date;
+ from_date?: string | Date;
+ orderby?: string;
+ desc?: boolean;
+ page?: number;
+ page_size?: number;
+}
+
+export interface IAgentLogMessage {
+ content: string;
+ role: 'user' | 'assistant';
+ id: string;
+}
+
+export interface IPipeLineListRequest {
+ page?: number;
+ page_size?: number;
+ keywords?: string;
+ orderby?: string;
+ desc?: boolean;
+ canvas_category?: AgentCategory;
+}
diff --git a/src/interfaces/database/base.ts b/src/interfaces/database/base.ts
new file mode 100644
index 0000000..da02afc
--- /dev/null
+++ b/src/interfaces/database/base.ts
@@ -0,0 +1,17 @@
+export interface ResponseType {
+ code: number;
+ data: T;
+ message: string;
+ status: number;
+}
+
+export interface ResponseGetType {
+ data: T;
+ loading?: boolean;
+}
+
+export interface ResponsePostType {
+ data: T;
+ loading?: boolean;
+ [key: string]: unknown;
+}
diff --git a/src/interfaces/database/chat.ts b/src/interfaces/database/chat.ts
new file mode 100644
index 0000000..62bcb46
--- /dev/null
+++ b/src/interfaces/database/chat.ts
@@ -0,0 +1,182 @@
+import { MessageType } from '@/constants/chat';
+
+export interface PromptConfig {
+ empty_response: string;
+ parameters: Parameter[];
+ prologue: string;
+ system: string;
+ tts?: boolean;
+ quote: boolean;
+ keyword: boolean;
+ refine_multiturn: boolean;
+ use_kg: boolean;
+ reasoning?: boolean;
+ cross_languages?: Array;
+}
+
+export interface Parameter {
+ key: string;
+ optional: boolean;
+}
+
+export interface LlmSetting {
+ Creative: Variable;
+ Custom: Variable;
+ Evenly: Variable;
+ Precise: Variable;
+}
+
+export interface Variable {
+ frequency_penalty?: number;
+ max_tokens?: number;
+ presence_penalty?: number;
+ temperature?: number;
+ top_p?: number;
+ llm_id?: string;
+}
+
+export interface IDialog {
+ create_date: string;
+ create_time: number;
+ description: string;
+ icon: string;
+ id: string;
+ dialog_id: string;
+ kb_ids: string[];
+ kb_names: string[];
+ language: string;
+ llm_id: string;
+ llm_setting: Variable;
+ llm_setting_type: string;
+ name: string;
+ prompt_config: PromptConfig;
+ prompt_type: string;
+ status: string;
+ tenant_id: string;
+ update_date: string;
+ update_time: number;
+ vector_similarity_weight: number;
+ similarity_threshold: number;
+ top_k: number;
+ top_n: number;
+ meta_data_filter: MetaDataFilter;
+}
+
+interface MetaDataFilter {
+ manual: Manual[];
+ method: string;
+}
+
+interface Manual {
+ key: string;
+ op: string;
+ value: string;
+}
+
+export interface IConversation {
+ create_date: string;
+ create_time: number;
+ dialog_id: string;
+ id: string;
+ avatar: string;
+ message: Message[];
+ reference: IReference[];
+ name: string;
+ update_date: string;
+ update_time: number;
+ is_new: true;
+}
+
+export interface Message {
+ content: string;
+ role: MessageType;
+ doc_ids?: string[];
+ prompt?: string;
+ id?: string;
+ audio_binary?: string;
+ data?: any;
+ files?: File[];
+ chatBoxId?: string;
+}
+
+export interface IReferenceChunk {
+ id: string;
+ content: null;
+ document_id: string;
+ document_name: string;
+ dataset_id: string;
+ image_id: string;
+ similarity: number;
+ vector_similarity: number;
+ term_similarity: number;
+ positions: number[];
+ doc_type?: string;
+}
+
+export interface IReference {
+ chunks: IReferenceChunk[];
+ doc_aggs: Docagg[];
+ total: number;
+}
+
+export interface IReferenceObject {
+ chunks: Record;
+ doc_aggs: Record;
+}
+
+export interface IAnswer {
+ answer: string;
+ reference?: IReference;
+ conversationId?: string;
+ prompt?: string;
+ id?: string;
+ audio_binary?: string;
+ data?: any;
+ chatBoxId?: string;
+}
+
+export interface Docagg {
+ count: number;
+ doc_id: string;
+ doc_name: string;
+ url?: string;
+}
+
+// interface Chunk {
+// chunk_id: string;
+// content_ltks: string;
+// content_with_weight: string;
+// doc_id: string;
+// docnm_kwd: string;
+// img_id: string;
+// important_kwd: any[];
+// kb_id: string;
+// similarity: number;
+// term_similarity: number;
+// vector_similarity: number;
+// }
+
+export interface IToken {
+ create_date: string;
+ create_time: number;
+ tenant_id: string;
+ token: string;
+ update_date?: any;
+ update_time?: any;
+ beta: string;
+}
+
+export interface IStats {
+ pv: [string, number][];
+ uv: [string, number][];
+ speed: [string, number][];
+ tokens: [string, number][];
+ round: [string, number][];
+ thumb_up: [string, number][];
+}
+
+export interface IExternalChatInfo {
+ avatar?: string;
+ title: string;
+ prologue?: string;
+}
diff --git a/src/interfaces/database/document.ts b/src/interfaces/database/document.ts
new file mode 100644
index 0000000..5edbad1
--- /dev/null
+++ b/src/interfaces/database/document.ts
@@ -0,0 +1,58 @@
+import { RunningStatus } from '@/constants/knowledge';
+
+export interface IDocumentInfo {
+ chunk_num: number;
+ create_date: string;
+ create_time: number;
+ created_by: string;
+ nickname: string;
+ id: string;
+ kb_id: string;
+ location: string;
+ name: string;
+ parser_config: IParserConfig;
+ parser_id: string;
+ pipeline_id: string;
+ pipeline_name: string;
+ process_begin_at?: string;
+ process_duration: number;
+ progress: number;
+ progress_msg: string;
+ run: RunningStatus;
+ size: number;
+ source_type: string;
+ status: string;
+ suffix: string;
+ thumbnail: string;
+ token_num: number;
+ type: string;
+ update_date: string;
+ update_time: number;
+ meta_fields?: Record;
+}
+
+export interface IParserConfig {
+ delimiter?: string;
+ html4excel?: boolean;
+ layout_recognize?: boolean;
+ pages: any[];
+ raptor?: Raptor;
+ graphrag?: GraphRag;
+}
+
+interface Raptor {
+ use_raptor: boolean;
+}
+
+interface GraphRag {
+ community?: boolean;
+ entity_types?: string[];
+ method?: string;
+ resolution?: boolean;
+ use_graphrag?: boolean;
+}
+
+export type IDocumentInfoFilter = {
+ run_status: Record;
+ suffix: Record;
+};
diff --git a/src/interfaces/database/file-manager.ts b/src/interfaces/database/file-manager.ts
new file mode 100644
index 0000000..1a58e98
--- /dev/null
+++ b/src/interfaces/database/file-manager.ts
@@ -0,0 +1,39 @@
+export interface IFile {
+ create_date: string;
+ create_time: number;
+ created_by: string;
+ id: string;
+ kbs_info: { kb_id: string; kb_name: string }[];
+ location: string;
+ name: string;
+ parent_id: string;
+ size: number;
+ tenant_id: string;
+ type: string;
+ update_date: string;
+ update_time: number;
+ source_type: string;
+ has_child_folder?: boolean;
+}
+
+export interface IFolder {
+ create_date: string;
+ create_time: number;
+ created_by: string;
+ id: string;
+ location: string;
+ name: string;
+ parent_id: string;
+ size: number;
+ tenant_id: string;
+ type: string;
+ update_date: string;
+ update_time: number;
+ source_type: string;
+}
+
+export type IFetchFileListResult = {
+ files: IFile[];
+ parent_folder: IFolder;
+ total: number;
+};
diff --git a/src/interfaces/database/flow.ts b/src/interfaces/database/flow.ts
new file mode 100644
index 0000000..a17d215
--- /dev/null
+++ b/src/interfaces/database/flow.ts
@@ -0,0 +1,187 @@
+import { Edge, Node } from '@xyflow/react';
+import { IReference, Message } from './chat';
+
+export type DSLComponents = Record;
+
+export interface DSL {
+ components: DSLComponents;
+ history: any[];
+ path?: string[][];
+ answer?: any[];
+ graph?: IGraph;
+ messages: Message[];
+ reference: IReference[];
+ globals: Record;
+ retrieval: IReference[];
+}
+
+export interface IOperator {
+ obj: IOperatorNode;
+ downstream: string[];
+ upstream: string[];
+ parent_id?: string;
+}
+
+export interface IOperatorNode {
+ component_name: string;
+ params: Record;
+}
+
+export declare interface IFlow {
+ avatar?: string;
+ canvas_type: null;
+ create_date: string;
+ create_time: number;
+ description: string;
+ dsl: DSL;
+ id: string;
+ title: string;
+ update_date: string;
+ update_time: number;
+ user_id: string;
+ permission: string;
+ nickname: string;
+}
+
+export interface IFlowTemplate {
+ avatar: string;
+ canvas_type: string;
+ create_date: string;
+ create_time: number;
+ description: {
+ en: string;
+ zh: string;
+ };
+ dsl: DSL;
+ id: string;
+ title: {
+ en: string;
+ zh: string;
+ };
+ update_date: string;
+ update_time: number;
+}
+
+export type ICategorizeItemResult = Record<
+ string,
+ Omit
+>;
+
+export interface IGenerateForm {
+ max_tokens?: number;
+ temperature?: number;
+ top_p?: number;
+ presence_penalty?: number;
+ frequency_penalty?: number;
+ cite?: boolean;
+ prompt: number;
+ llm_id: string;
+ parameters: { key: string; component_id: string };
+}
+export interface ICategorizeItem {
+ name: string;
+ description?: string;
+ examples?: string;
+ to?: string;
+ index: number;
+}
+
+export interface ICategorizeForm extends IGenerateForm {
+ category_description: ICategorizeItemResult;
+}
+
+export interface IRelevantForm extends IGenerateForm {
+ yes: string;
+ no: string;
+}
+
+export interface ISwitchCondition {
+ items: ISwitchItem[];
+ logical_operator: string;
+ to: string[] | string;
+}
+
+export interface ISwitchItem {
+ cpn_id: string;
+ operator: string;
+ value: string;
+}
+
+export interface ISwitchForm {
+ conditions: ISwitchCondition[];
+ end_cpn_id: string;
+ no: string;
+}
+
+export interface IBeginForm {
+ prologue?: string;
+}
+
+export interface IRetrievalForm {
+ similarity_threshold?: number;
+ keywords_similarity_weight?: number;
+ top_n?: number;
+ top_k?: number;
+ rerank_id?: string;
+ empty_response?: string;
+ kb_ids: string[];
+}
+
+export interface ICodeForm {
+ inputs?: Array<{ name?: string; component_id?: string }>;
+ lang: string;
+ script?: string;
+}
+
+export type BaseNodeData = {
+ label: string; // operator type
+ name: string; // operator name
+ color?: string;
+ form?: TForm;
+};
+
+export type BaseNode = Node>;
+
+export type IBeginNode = BaseNode;
+export type IRetrievalNode = BaseNode;
+export type IGenerateNode = BaseNode;
+export type ICategorizeNode = BaseNode;
+export type ISwitchNode = BaseNode;
+export type IRagNode = BaseNode;
+export type IRelevantNode = BaseNode;
+export type ILogicNode = BaseNode;
+export type INoteNode = BaseNode;
+export type IMessageNode = BaseNode;
+export type IRewriteNode = BaseNode;
+export type IInvokeNode = BaseNode;
+export type ITemplateNode = BaseNode;
+export type IEmailNode = BaseNode;
+export type IIterationNode = BaseNode;
+export type IIterationStartNode = BaseNode;
+export type IKeywordNode = BaseNode;
+export type ICodeNode = BaseNode;
+export type IAgentNode = BaseNode;
+
+export type RAGFlowNodeType =
+ | IBeginNode
+ | IRetrievalNode
+ | IGenerateNode
+ | ICategorizeNode
+ | ISwitchNode
+ | IRagNode
+ | IRelevantNode
+ | ILogicNode
+ | INoteNode
+ | IMessageNode
+ | IRewriteNode
+ | IInvokeNode
+ | ITemplateNode
+ | IEmailNode
+ | IIterationNode
+ | IIterationStartNode
+ | IKeywordNode;
+
+export interface IGraph {
+ nodes: RAGFlowNodeType[];
+ edges: Edge[];
+}
diff --git a/src/interfaces/database/knowledge.ts b/src/interfaces/database/knowledge.ts
new file mode 100644
index 0000000..d768d8e
--- /dev/null
+++ b/src/interfaces/database/knowledge.ts
@@ -0,0 +1,168 @@
+import { RunningStatus } from '@/constants/knowledge';
+import { TreeData } from '@antv/g6/lib/types';
+
+// knowledge base
+export interface IKnowledge {
+ avatar?: any;
+ chunk_num: number;
+ create_date: string;
+ create_time: number;
+ created_by: string;
+ description: string;
+ doc_num: number;
+ id: string;
+ name: string;
+ parser_config: ParserConfig;
+ parser_id: string;
+ pipeline_id: string;
+ pipeline_name: string;
+ pipeline_avatar: string;
+ permission: string;
+ similarity_threshold: number;
+ status: string;
+ tenant_id: string;
+ token_num: number;
+ update_date: string;
+ update_time: number;
+ vector_similarity_weight: number;
+ embd_id: string;
+ nickname: string;
+ operator_permission: number;
+ size: number;
+ raptor_task_finish_at?: string;
+ raptor_task_id?: string;
+ mindmap_task_finish_at?: string;
+ mindmap_task_id?: string;
+}
+
+export interface IKnowledgeResult {
+ kbs: IKnowledge[];
+ total: number;
+}
+
+export interface Raptor {
+ use_raptor: boolean;
+}
+
+export interface ParserConfig {
+ from_page?: number;
+ to_page?: number;
+ auto_keywords?: number;
+ auto_questions?: number;
+ chunk_token_num?: number;
+ delimiter?: string;
+ html4excel?: boolean;
+ layout_recognize?: boolean;
+ raptor?: Raptor;
+ tag_kb_ids?: string[];
+ topn_tags?: number;
+ graphrag?: { use_graphrag?: boolean };
+}
+
+export interface IKnowledgeFileParserConfig {
+ chunk_token_num: number;
+ layout_recognize: boolean;
+ pages: number[][];
+ task_page_size: number;
+}
+export interface IKnowledgeFile {
+ chunk_num: number;
+ create_date: string;
+ create_time: number;
+ created_by: string;
+ id: string;
+ kb_id: string;
+ location: string;
+ name: string;
+ parser_id: string;
+ process_begin_at?: any;
+ process_duration: number;
+ progress: number; // parsing process
+ progress_msg: string; // parsing log
+ run: RunningStatus; // parsing status
+ size: number;
+ source_type: string;
+ status: string; // enabled
+ thumbnail?: any; // base64
+ token_num: number;
+ type: string;
+ update_date: string;
+ update_time: number;
+ parser_config: IKnowledgeFileParserConfig;
+}
+
+export interface ITenantInfo {
+ asr_id: string;
+ embd_id: string;
+ img2txt_id: string;
+ llm_id: string;
+ name: string;
+ parser_ids: string;
+ role: string;
+ tenant_id: string;
+ chat_id: string;
+ speech2text_id: string;
+ tts_id: string;
+}
+
+export interface IChunk {
+ available_int: number; // Whether to enable, 0: not enabled, 1: enabled
+ chunk_id: string;
+ content_with_weight: string;
+ doc_id: string;
+ doc_name: string;
+ image_id: string;
+ important_kwd?: string[];
+ question_kwd?: string[]; // keywords
+ tag_kwd?: string[];
+ positions: number[][];
+ tag_feas?: Record;
+}
+
+export interface ITestingChunk {
+ chunk_id: string;
+ content_ltks: string;
+ content_with_weight: string;
+ doc_id: string;
+ doc_name: string;
+ img_id: string;
+ image_id: string;
+ important_kwd: any[];
+ kb_id: string;
+ similarity: number;
+ term_similarity: number;
+ vector: number[];
+ vector_similarity: number;
+ highlight: string;
+ positions: number[][];
+ docnm_kwd: string;
+ doc_type_kwd: string;
+}
+
+export interface ITestingDocument {
+ count: number;
+ doc_id: string;
+ doc_name: string;
+}
+
+export interface ITestingResult {
+ chunks: ITestingChunk[];
+ documents: ITestingDocument[];
+ total: number;
+ labels?: Record;
+}
+
+export interface INextTestingResult {
+ chunks: ITestingChunk[];
+ doc_aggs: ITestingDocument[];
+ total: number;
+ labels?: Record;
+ isRuned?: boolean;
+}
+
+export type IRenameTag = { fromTag: string; toTag: string };
+
+export interface IKnowledgeGraph {
+ graph: Record;
+ mind_map: TreeData;
+}
diff --git a/src/interfaces/database/llm.ts b/src/interfaces/database/llm.ts
new file mode 100644
index 0000000..2608e5a
--- /dev/null
+++ b/src/interfaces/database/llm.ts
@@ -0,0 +1,41 @@
+export interface IThirdOAIModel {
+ available: boolean;
+ create_date: string;
+ create_time: number;
+ fid: string;
+ id: number;
+ llm_name: string;
+ max_tokens: number;
+ model_type: string;
+ status: string;
+ tags: string;
+ update_date: string;
+ update_time: number;
+ tenant_id?: string;
+ tenant_name?: string;
+ is_tools: boolean;
+}
+
+export type IThirdOAIModelCollection = Record;
+
+export interface IFactory {
+ create_date: string;
+ create_time: number;
+ logo: string;
+ name: string;
+ status: string;
+ tags: string;
+ update_date: string;
+ update_time: number;
+}
+
+export interface IMyLlmValue {
+ llm: Llm[];
+ tags: string;
+}
+
+export interface Llm {
+ name: string;
+ type: string;
+ used_token: number;
+}
diff --git a/src/interfaces/database/mcp-server.ts b/src/interfaces/database/mcp-server.ts
new file mode 100644
index 0000000..34ed7e4
--- /dev/null
+++ b/src/interfaces/database/mcp-server.ts
@@ -0,0 +1,19 @@
+export enum McpServerType {
+ Sse = 'sse',
+ StreamableHttp = 'streamable-http',
+}
+
+export interface IMcpServerVariable {
+ key: string;
+ name: string;
+}
+
+export interface IMcpServerInfo {
+ id: string;
+ name: string;
+ url: string;
+ server_type: McpServerType;
+ description?: string;
+ variables?: IMcpServerVariable[];
+ headers: Map;
+}
diff --git a/src/interfaces/database/mcp.ts b/src/interfaces/database/mcp.ts
new file mode 100644
index 0000000..143cf8c
--- /dev/null
+++ b/src/interfaces/database/mcp.ts
@@ -0,0 +1,60 @@
+export interface IMcpServer {
+ create_date: string;
+ description: null;
+ id: string;
+ name: string;
+ server_type: string;
+ update_date: string;
+ url: string;
+ variables: Record & { tools?: IMCPToolObject };
+}
+
+export type IMCPToolObject = Record>;
+
+export type IMCPToolRecord = Record;
+
+export interface IMcpServerListResponse {
+ mcp_servers: IMcpServer[];
+ total: number;
+}
+
+export interface IMCPTool {
+ annotations: null;
+ description: string;
+ enabled: boolean;
+ inputSchema: InputSchema;
+ name: string;
+}
+
+interface InputSchema {
+ properties: Properties;
+ required: string[];
+ title: string;
+ type: string;
+}
+
+interface Properties {
+ symbol: ISymbol;
+}
+
+interface ISymbol {
+ title: string;
+ type: string;
+}
+
+export interface IExportedMcpServers {
+ mcpServers: McpServers;
+}
+
+interface McpServers {
+ fetch_2: IExportedMcpServer;
+ github_1: IExportedMcpServer;
+}
+
+export interface IExportedMcpServer {
+ authorization_token: string;
+ name: string;
+ tool_configuration: Record;
+ type: string;
+ url: string;
+}
diff --git a/src/interfaces/database/plugin.ts b/src/interfaces/database/plugin.ts
new file mode 100644
index 0000000..0f28494
--- /dev/null
+++ b/src/interfaces/database/plugin.ts
@@ -0,0 +1,13 @@
+export type ILLMTools = ILLMToolMetadata[];
+
+export interface ILLMToolMetadata {
+ name: string;
+ displayName: string;
+ displayDescription: string;
+ parameters: Map;
+}
+
+export interface ILLMToolParameter {
+ type: string;
+ displayDescription: string;
+}
diff --git a/src/interfaces/database/system.ts b/src/interfaces/database/system.ts
new file mode 100644
index 0000000..3d8621f
--- /dev/null
+++ b/src/interfaces/database/system.ts
@@ -0,0 +1,7 @@
+export interface ILangfuseConfig {
+ secret_key: string;
+ public_key: string;
+ host: string;
+ project_id: string;
+ project_name: string;
+}
diff --git a/src/interfaces/database/user-setting.ts b/src/interfaces/database/user-setting.ts
new file mode 100644
index 0000000..ff4094d
--- /dev/null
+++ b/src/interfaces/database/user-setting.ts
@@ -0,0 +1,96 @@
+export interface IUserInfo {
+ access_token: string;
+ avatar?: any;
+ color_schema: string;
+ create_date: string;
+ create_time: number;
+ email: string;
+ id: string;
+ is_active: string;
+ is_anonymous: string;
+ is_authenticated: string;
+ is_superuser: boolean;
+ language: string;
+ last_login_time: string;
+ login_channel: string;
+ nickname: string;
+ password: string;
+ status: string;
+ timezone: string;
+ update_date: string;
+ update_time: number;
+}
+
+export type TaskExecutorElapsed = Record;
+
+export interface TaskExecutorHeartbeatItem {
+ boot_at: string;
+ current: null;
+ done: number;
+ failed: number;
+ lag: number;
+ name: string;
+ now: string;
+ pending: number;
+}
+
+export interface ISystemStatus {
+ es: Es;
+ storage: Storage;
+ database: Database;
+ redis: Redis;
+ task_executor_heartbeat: Record;
+}
+
+interface Redis {
+ status: string;
+ elapsed: number;
+ error: string;
+ pending: number;
+}
+
+export interface Storage {
+ status: string;
+ elapsed: number;
+ error: string;
+}
+
+export interface Database {
+ status: string;
+ elapsed: number;
+ error: string;
+}
+
+interface Es {
+ status: string;
+ elapsed: number;
+ error: string;
+ number_of_nodes: number;
+ active_shards: number;
+}
+
+export interface ITenantUser {
+ id: string;
+ avatar: string;
+ delta_seconds: number;
+ email: string;
+ is_active: string;
+ is_anonymous: string;
+ is_authenticated: string;
+ is_superuser: boolean;
+ nickname: string;
+ role: string;
+ status: string;
+ update_date: string;
+ user_id: string;
+}
+
+export interface ITenant {
+ avatar: string;
+ delta_seconds: number;
+ email: string;
+ nickname: string;
+ role: string;
+ tenant_id: string;
+ update_date: string;
+}
diff --git a/src/interfaces/request/agent.ts b/src/interfaces/request/agent.ts
new file mode 100644
index 0000000..1c6ee8b
--- /dev/null
+++ b/src/interfaces/request/agent.ts
@@ -0,0 +1,4 @@
+export interface IDebugSingleRequestBody {
+ component_id: string;
+ params: Record;
+}
diff --git a/src/interfaces/request/base.ts b/src/interfaces/request/base.ts
new file mode 100644
index 0000000..789be81
--- /dev/null
+++ b/src/interfaces/request/base.ts
@@ -0,0 +1,7 @@
+export interface IPaginationRequestBody {
+ keywords?: string;
+ page?: number;
+ page_size?: number; // name|create|doc_num|create_time|update_time, default:create_time
+ orderby?: string;
+ desc?: string;
+}
diff --git a/src/interfaces/request/chat.ts b/src/interfaces/request/chat.ts
new file mode 100644
index 0000000..6ae1e1c
--- /dev/null
+++ b/src/interfaces/request/chat.ts
@@ -0,0 +1,11 @@
+export interface IFeedbackRequestBody {
+ messageId?: string;
+ thumbup?: boolean;
+ feedback?: string;
+}
+
+export interface IAskRequestBody {
+ question: string;
+ kb_ids: string[];
+ search_id?: string;
+}
diff --git a/src/interfaces/request/document.ts b/src/interfaces/request/document.ts
new file mode 100644
index 0000000..88bb449
--- /dev/null
+++ b/src/interfaces/request/document.ts
@@ -0,0 +1,18 @@
+export interface IChangeParserConfigRequestBody {
+ pages: number[][];
+ chunk_token_num: number;
+ layout_recognize: boolean;
+ task_page_size: number;
+}
+
+export interface IChangeParserRequestBody {
+ parser_id: string;
+ pipeline_id: string;
+ doc_id: string;
+ parser_config: IChangeParserConfigRequestBody;
+}
+
+export interface IDocumentMetaRequestBody {
+ documentId: string;
+ meta: string; // json format string
+}
diff --git a/src/interfaces/request/file-manager.ts b/src/interfaces/request/file-manager.ts
new file mode 100644
index 0000000..b355bf3
--- /dev/null
+++ b/src/interfaces/request/file-manager.ts
@@ -0,0 +1,14 @@
+import { IPaginationRequestBody } from './base';
+
+export interface IFileListRequestBody extends IPaginationRequestBody {
+ parent_id?: string; // folder id
+}
+
+interface BaseRequestBody {
+ parentId: string;
+}
+
+export interface IConnectRequestBody {
+ fileIds: string[];
+ kbIds: string[];
+}
diff --git a/src/interfaces/request/flow.ts b/src/interfaces/request/flow.ts
new file mode 100644
index 0000000..0ee8bcd
--- /dev/null
+++ b/src/interfaces/request/flow.ts
@@ -0,0 +1,4 @@
+export interface IDebugSingleRequestBody {
+ component_id: string;
+ params: any[];
+}
diff --git a/src/interfaces/request/knowledge.ts b/src/interfaces/request/knowledge.ts
new file mode 100644
index 0000000..de1b00b
--- /dev/null
+++ b/src/interfaces/request/knowledge.ts
@@ -0,0 +1,26 @@
+export interface ITestRetrievalRequestBody {
+ question: string;
+ similarity_threshold: number;
+ vector_similarity_weight: number;
+ rerank_id?: string;
+ top_k?: number;
+ use_kg?: boolean;
+ highlight?: boolean;
+ kb_id?: string[];
+}
+
+export interface IFetchKnowledgeListRequestBody {
+ owner_ids?: string[];
+}
+
+export interface IFetchKnowledgeListRequestParams {
+ kb_id?: string;
+ keywords?: string;
+ page?: number;
+ page_size?: number;
+}
+
+export interface IFetchDocumentListRequestBody {
+ suffix?: string[];
+ run_status?: string[];
+}
diff --git a/src/interfaces/request/llm.ts b/src/interfaces/request/llm.ts
new file mode 100644
index 0000000..05f8f47
--- /dev/null
+++ b/src/interfaces/request/llm.ts
@@ -0,0 +1,13 @@
+export interface IAddLlmRequestBody {
+ llm_factory: string; // Ollama
+ llm_name: string;
+ model_type: string;
+ api_base?: string; // chat|embedding|speech2text|image2text
+ api_key: string;
+ max_tokens: number;
+}
+
+export interface IDeleteLlmRequestBody {
+ llm_factory: string; // Ollama
+ llm_name?: string;
+}
diff --git a/src/interfaces/request/mcp.ts b/src/interfaces/request/mcp.ts
new file mode 100644
index 0000000..96891ad
--- /dev/null
+++ b/src/interfaces/request/mcp.ts
@@ -0,0 +1,16 @@
+import { IExportedMcpServer } from '@/interfaces/database/mcp';
+
+export interface ITestMcpRequestBody {
+ server_type: string;
+ url: string;
+ headers?: Record;
+ variables?: Record;
+ timeout?: number;
+}
+
+export interface IImportMcpServersRequestBody {
+ mcpServers: Record<
+ string,
+ Pick
+ >;
+}
diff --git a/src/interfaces/request/system.ts b/src/interfaces/request/system.ts
new file mode 100644
index 0000000..3be5664
--- /dev/null
+++ b/src/interfaces/request/system.ts
@@ -0,0 +1,5 @@
+export interface ISetLangfuseConfigRequestBody {
+ secret_key: string;
+ public_key: string;
+ host: string;
+}
diff --git a/src/locales/en.ts b/src/locales/en.ts
index 99f2dee..e160af5 100644
--- a/src/locales/en.ts
+++ b/src/locales/en.ts
@@ -2,12 +2,13 @@ export default {
translation: {
common: {
noResults: 'No results.',
- selectPlaceholder: 'Please select',
- selectAll: 'Select all',
+ selectPlaceholder: 'select value',
+ selectAll: 'Select All',
delete: 'Delete',
- deleteModalTitle: 'Are you sure to delete?',
+ deleteModalTitle: 'Are you sure to delete this item?',
ok: 'Yes',
cancel: 'No',
+ no: 'No',
total: 'Total',
rename: 'Rename',
name: 'Name',
@@ -22,10 +23,10 @@ export default {
chinese: 'Simplified Chinese',
traditionalChinese: 'Traditional Chinese',
language: 'Language',
- languageMessage: 'Please input language',
- languagePlaceholder: 'Please select language',
+ languageMessage: 'Please input your language!',
+ languagePlaceholder: 'select your language',
copy: 'Copy',
- copied: 'Copied successfully',
+ copied: 'Copied',
comingSoon: 'Coming soon',
download: 'Download',
close: 'Close',
@@ -33,51 +34,58 @@ export default {
move: 'Move',
warn: 'Warn',
action: 'Action',
- s: 's',
+ s: 'S',
pleaseSelect: 'Please select',
pleaseInput: 'Please input',
submit: 'Submit',
clear: 'Clear',
- embedIntoSite: 'Embed into site',
- previousPage: 'Previous page',
- nextPage: 'Next page',
+ embedIntoSite: 'Embed into webpage',
+ previousPage: 'Previous',
+ nextPage: 'Next',
add: 'Add',
remove: 'Remove',
search: 'Search',
noDataFound: 'No data found.',
noData: 'No data',
- promptPlaceholder: 'Please input or use / to quickly insert variables.',
+ promptPlaceholder: `Please input or use / to quickly insert variables.`,
+ mcp: {
+ namePlaceholder: 'My MCP Server',
+ nameRequired:
+ 'It must be 1–64 characters long and can only contain letters, numbers, hyphens, and underscores.',
+ urlPlaceholder: 'https://api.example.com/v1/mcp',
+ tokenPlaceholder: 'e.g. eyJhbGciOiJIUzI1Ni...',
+ },
},
login: {
- login: 'Login',
- signUp: 'Sign Up',
- loginDescription: 'Nice to see you again!',
- registerDescription: 'Nice to have you join!',
+ login: 'Sign in',
+ signUp: 'Sign up',
+ loginDescription: 'We’re so excited to see you again!',
+ registerDescription: 'Glad to have you on board!',
emailLabel: 'Email',
- emailPlaceholder: 'Please enter email address',
+ emailPlaceholder: 'Please input email',
passwordLabel: 'Password',
- passwordPlaceholder: 'Please enter password',
+ passwordPlaceholder: 'Please input password',
rememberMe: 'Remember me',
- signInTip: "Don't have an account?",
+ signInTip: 'Don’t have an account?',
signUpTip: 'Already have an account?',
- nicknameLabel: 'Name',
- nicknamePlaceholder: 'Please enter name',
- register: 'Create account',
+ nicknameLabel: 'Nickname',
+ nicknamePlaceholder: 'Please input nickname',
+ register: 'Create an account',
continue: 'Continue',
- title: 'Start building your intelligent assistant',
+ title: 'Start building your smart assistants.',
description:
- 'Sign up for free to explore top-tier RAG technology. Create knowledge bases and AI to enhance your business',
- review: 'From 500+ reviews',
+ 'Sign up for free to explore top RAG technology. Create knowledge bases and AIs to empower your business.',
+ review: 'from 500+ reviews',
},
header: {
- knowledgeBase: 'Knowledge Base',
+ knowledgeBase: 'Dataset',
chat: 'Chat',
register: 'Register',
- signin: 'Sign In',
+ signin: 'Sign in',
home: 'Home',
- setting: 'User Settings',
- logout: 'Logout',
- fileManager: 'File Manager',
+ setting: 'User settings',
+ logout: 'Log out',
+ fileManager: 'File Management',
flow: 'Agent',
search: 'Search',
welcome: 'Welcome to',
@@ -85,92 +93,99 @@ export default {
},
knowledgeList: {
welcome: 'Welcome back',
- description: 'Which knowledge base are we going to use today?',
- createKnowledgeBase: 'Create knowledge base',
+ description: 'Which knowledge bases will you use today?',
+ createKnowledgeBase: 'Create Dataset',
name: 'Name',
- namePlaceholder: 'Please input name',
- doc: 'Doc',
+ namePlaceholder: 'Please input name!',
+ doc: 'Docs',
searchKnowledgePlaceholder: 'Search',
- noMoreData: 'No more data',
+ noMoreData: `That's all. Nothing more.`,
},
knowledgeDetails: {
- fileSize: 'File size',
- fileType: 'File type',
- uploadedBy: 'Created by',
+ fileSize: 'File Size',
+ fileType: 'File Type',
+ uploadedBy: 'Uploaded by',
notGenerated: 'Not generated',
generatedOn: 'Generated on',
subbarFiles: 'Files',
+ generateKnowledgeGraph:
+ 'This will extract entities and relationships from all your documents in this dataset. The process may take a while to complete.',
+ generateRaptor:
+ 'This will extract entities and relationships from all your documents in this dataset. The process may take a while to complete.',
generate: 'Generate',
raptor: 'Raptor',
- processingType: 'Processing type',
- dataPipeline: 'Data pipeline',
+ processingType: 'Processing Type',
+ dataPipeline: 'Data Pipeline',
operations: 'Operations',
taskId: 'Task ID',
duration: 'Duration',
details: 'Details',
status: 'Status',
task: 'Task',
- startDate: 'Start date',
+ startDate: 'Start Date',
source: 'Source',
- fileName: 'File name',
- datasetLogs: 'Dataset logs',
- fileLogs: 'File logs',
+ fileName: 'File Name',
+ datasetLogs: 'Dataset Logs',
+ fileLogs: 'File Logs',
overview: 'Overview',
success: 'Success',
failed: 'Failed',
completed: 'Completed',
- datasetLog: 'Dataset log',
+ datasetLog: 'Dataset Log',
created: 'Created',
- learnMore: 'Learn more',
+ learnMore: 'Learn More',
general: 'General',
- chunkMethodTab: 'Chunk method',
- testResults: 'Test results',
- testSetting: 'Test setting',
- retrievalTesting: 'Retrieval testing',
+ chunkMethodTab: 'Chunk Method',
+ testResults: 'Test Results',
+ testSetting: 'Test Setting',
+ retrievalTesting: 'Retrieval Testing',
retrievalTestingDescription:
- 'Conduct retrieval testing to check if RAGFlow can retrieve the expected content for the large language model (LLM).',
+ 'Conduct a retrieval test to check if RAGFlow can recover the intended content for the LLM.',
Parse: 'Parse',
dataset: 'Dataset',
- testing: 'Testing',
- configuration: 'Configuration',
- knowledgeGraph: 'Knowledge graph',
+ testing: 'Retrieval testing',
files: 'files',
+ configuration: 'Configuration',
+ knowledgeGraph: 'Knowledge Graph',
name: 'Name',
- namePlaceholder: 'Please input name',
- doc: 'Doc',
- datasetDescription: 'You can only chat after parsing is successful.',
+ namePlaceholder: 'Please input name!',
+ doc: 'Docs',
+ datasetDescription:
+ 'Please wait for your files to finish parsing before starting an AI-powered chat.',
addFile: 'Add file',
- searchFiles: 'Search files',
+ searchFiles: 'Search your files',
localFiles: 'Local files',
emptyFiles: 'Create empty file',
- webCrawl: 'Web crawl',
- chunkNumber: 'Chunk number',
- uploadDate: 'Upload date',
- chunkMethod: 'Chunk method',
- enabled: 'Enabled',
- disabled: 'Disabled',
+ webCrawl: 'Web Crawl',
+ chunkNumber: 'Chunk Number',
+ uploadDate: 'Upload Date',
+ chunkMethod: 'Chunking method',
+ enabled: 'Enable',
+ disabled: 'Disable',
action: 'Action',
- parsingStatus: 'Parsing status',
+ parsingStatus: 'Parsing Status',
parsingStatusTip:
- 'The time for text parsing depends on many factors. If knowledge graph, RAPTOR, automatic question extraction, automatic keyword extraction and other functions are enabled, the time will be longer. If the parsing progress bar does not update for a long time, you can also refer to these two FAQs: https://ragflow.io/docs/dev/faq#why-does-my-document-parsing-stall-at-under-one-percent.',
+ 'Document parsing time varies based on several factors. Enabling features like Knowledge Graph, RAPTOR, Auto Question Extraction, or Auto Keyword Extraction will significantly increase processing time. If the progress bar stalls, please consult these two FAQs: https://ragflow.io/docs/dev/faq#why-does-my-document-parsing-stall-at-under-one-percent.',
processBeginAt: 'Begin at',
processDuration: 'Duration',
progressMsg: 'Progress',
- noTestResultsForRuned: 'No relevant results found, please try adjusting the query statement or parameters',
- noTestResultsForNotRuned: 'No test has been run yet, results will be displayed here',
+ noTestResultsForRuned:
+ 'No relevant results found. Try adjusting your query or parameters.',
+ noTestResultsForNotRuned:
+ 'No test has been run yet. Results will appear here.',
testingDescription:
- 'Please complete the recall test: ensure that your configuration can recall the correct text blocks from the database. If you adjust the default settings here, such as keyword similarity weight, please note that the changes here will not be automatically saved. Please be sure to synchronize and update related settings in the chat assistant settings or recall operator settings.',
+ 'Conduct a retrieval test to check if RAGFlow can recover the intended content for the LLM. If you have adjusted the default settings, such as keyword similarity weight or similarity threshold, to achieve the optimal results, be aware that these changes will not be automatically saved. You must apply them to your chat assistant settings or the Retrieval agent component settings.',
similarityThreshold: 'Similarity threshold',
similarityThresholdTip:
- 'We use a hybrid similarity score to evaluate the distance between two lines of text. It is a weighted keyword similarity and vector cosine similarity. If the similarity between the query and the chunk is less than this threshold, the chunk will be filtered out. The default setting is 0.2, which means that the hybrid similarity score of the text chunk must be at least 20 to be recalled.',
+ 'RAGFlow employs either a combination of weighted keyword similarity and weighted vector cosine similarity, or a combination of weighted keyword similarity and weighted reranking score during retrieval. This parameter sets the threshold for similarities between the user query and chunks. Any chunk with a similarity score below this threshold will be excluded from the results. By default, the threshold is set to 0.2. This means that only chunks with hybrid similarity score of 20 or higher will be retrieved.',
vectorSimilarityWeight: 'Vector similarity weight',
vectorSimilarityWeightTip:
- 'We use a hybrid similarity score to evaluate the distance between two lines of text. It is a weighted keyword similarity and vector cosine similarity or rerank score (0~1). The sum of the two weights is 1.0.',
+ 'This sets the weight of keyword similarity in the combined similarity score, either used with vector cosine similarity or with reranking score. The total of the two weights must equal 1.0.',
keywordSimilarityWeight: 'Keyword similarity weight',
keywordSimilarityWeightTip:
- 'We use a hybrid similarity score to evaluate the distance between two lines of text. It is a weighted keyword similarity and vector cosine similarity or rerank score (0~1). The sum of the two weights is 1.0.',
+ 'This sets the weight of keyword similarity in the combined similarity score, either used with vector cosine similarity or with reranking score. The total of the two weights must equal 1.0.',
testText: 'Test text',
- testTextPlaceholder: 'Please input your question!',
+ testTextPlaceholder: 'Input your question here!',
testingLabel: 'Testing',
similarity: 'Hybrid similarity',
termSimilarity: 'Term similarity',
@@ -180,247 +195,1622 @@ export default {
filesSelected: 'Files selected',
upload: 'Upload',
run: 'Parse',
- runningStatus0: 'Unparsed',
- runningStatus1: 'Parsing',
- runningStatus2: 'Cancel',
- runningStatus3: 'Success',
- runningStatus4: 'Failed',
- pageRanges: 'Page ranges',
+ runningStatus0: 'PENDING',
+ runningStatus1: 'PARSING',
+ runningStatus2: 'CANCELED',
+ runningStatus3: 'SUCCESS',
+ runningStatus4: 'FAIL',
+ pageRanges: 'Page Ranges',
pageRangesTip:
- 'Page ranges: Define the page ranges that need to be parsed. Pages not included in these ranges will be ignored.',
- fromPlaceholder: 'From',
+ 'Range of pages to be parsed; pages outside this range will not be processed.',
+ fromPlaceholder: 'from',
fromMessage: 'Missing start page number',
- toPlaceholder: 'To',
- toMessage: 'Missing end page number (exclusive)',
+ toPlaceholder: 'to',
+ toMessage: 'Missing end page number (excluded)',
layoutRecognize: 'PDF parser',
layoutRecognizeTip:
- 'Use visual models for PDF layout analysis to better identify document structure, find the location of titles, text blocks, images and tables. If you choose the Naive option, you can only get the plain text of the PDF. Please note that this function only applies to PDF documents and does not work for other documents. For more information, please refer to https://ragflow.io/docs/dev/select_pdf_parser.',
+ 'Use a visual model for PDF layout analysis to effectively locate document titles, text blocks, images, and tables. If the naive option is chosen, only the plain text in the PDF will be retrieved. Please note that this option currently works ONLY for PDF documents.',
taskPageSize: 'Task page size',
taskPageSizeMessage: 'Please input your task page size!',
- taskPageSizeTip: `If layout recognition is used, PDF files will be divided into consecutive groups. Layout analysis will be performed in parallel between groups to improve processing speed. "Task page size" determines the size of the group. The larger the page size, the lower the chance of splitting continuous text between pages into different chunks.`,
+ taskPageSizeTip: `During layout recognition, a PDF file is split into chunks and processed in parallel to increase processing speed. This parameter sets the size of each chunk. A larger chunk size reduces the likelihood of splitting continuous text between pages.`,
addPage: 'Add page',
- greaterThan: 'The current value must be greater than the starting value!',
- greaterThanPrevious: 'The current value must be greater than the previous value!',
+ greaterThan: 'The current value must be greater than to!',
+ greaterThanPrevious:
+ 'The current value must be greater than the previous to!',
selectFiles: 'Select files',
changeSpecificCategory: 'Change specific category',
- uploadTitle: 'Click or drag files to this area to upload',
+ uploadTitle: 'Drag and drop your file here to upload',
uploadDescription:
- 'Support single or batch upload. For local deployment, the total file size limit for a single upload is 1GB, the number of files for a single batch upload does not exceed 32, and there is no limit on the number of files for a single account. For demo.ragflow.io: the total file size limit for each upload is 10MB, each file must not exceed 10MB, and each account can upload up to 128 files. It is strictly prohibited to upload prohibited files.',
+ 'Supports single or batch file upload. For a locally deployed RAGFlow: the total file size limit per upload is 1GB, with a batch upload limit of 32 files. There is no cap on the total number of files per account. For demo.ragflow.io, the total file size limit per upload is 10MB, with each file not exceeding 10MB and a maximum of 128 files per account.',
chunk: 'Chunk',
bulk: 'Bulk',
cancel: 'Cancel',
close: 'Close',
rerankModel: 'Rerank model',
rerankPlaceholder: 'Please select',
- rerankTip: `Optional: If no rerank model is selected, the system will default to a hybrid query method that combines keyword similarity and vector cosine similarity; if a rerank model is set, the vector similarity part in the hybrid query will be replaced by rerank scoring. Please note: using a rerank model will be very time-consuming. If you need to use a rerank model, it is recommended to use a SaaS rerank model service; if you prefer to use a locally deployed rerank model, please make sure you start RAGFlow using docker-compose-gpu.yml.`,
+ rerankTip: `Optional. If left empty, RAGFlow will use a combination of weighted keyword similarity and weighted vector cosine similarity; if a rerank model is selected, a weighted reranking score will replace the weighted vector cosine similarity. Please be aware that using a rerank model will significantly increase the system's response time. If you wish to use a rerank model, ensure you use a SaaS reranker; if you prefer a locally deployed rerank model, ensure you start RAGFlow with docker-compose-gpu.yml.`,
topK: 'Top-K',
- topKTip: `Used in conjunction with the Rerank model to set the number of text blocks passed to the Rerank model.`,
- delimiter: `Text segmentation delimiter`,
+ topKTip: `Used together with the Rerank model, this setting defines the number of text chunks to be sent to the specified reranking model.`,
+ delimiter: `Delimiter for text`,
delimiterTip:
- 'Support multiple characters as delimiters, multiple characters are wrapped with two backticks \\`\\`. If configured as: \\n`##`; the system will first use line breaks, two # signs and semicolons to split the text, and then assemble the small text blocks according to the size set by "Suggested text block size". Please make sure you understand the above text segmentation and chunking mechanism before setting text segmentation delimiters.',
- html4excel: 'Table to HTML',
- html4excelTip: `Used in conjunction with the General chunking method. When not enabled, table files (XLSX, XLS (Excel 97-2003)) will be parsed as key-value pairs by row. When enabled, table files will be parsed as HTML tables. If the original table exceeds 12 rows, the system will automatically split it into multiple HTML tables with 12 rows each. For more details, please refer to https://ragflow.io/docs/dev/enable_excel2html.`,
- autoKeywords: 'Auto keyword extraction',
- autoKeywordsTip: `Automatically extract N keywords from each text block to improve query accuracy. Please note: This function uses the default chat model set in "System Model Settings" to extract keywords, so it will also consume more tokens. In addition, you can also manually update the generated keywords. For details, please see https://ragflow.io/docs/dev/autokeyword_autoquestion.`,
- autoQuestions: 'Auto question extraction',
- autoQuestionsTip: `Use the chat model set in "System Model Settings" to extract N questions from each text block in the knowledge base to improve its ranking score. Please note that enabling it will consume additional tokens. You can view and edit the results in the chunk list. If automatic question extraction fails, it will not hinder the entire chunking process, and only empty results will be added to the original text block. For details, please see https://ragflow.io/docs/dev/autokeyword_autoquestion.`,
- redo: 'Clear existing {{chunkNum}} chunks?',
- setMetaData: 'Set metadata',
- pleaseInputJson: 'Please input JSON',
- documentMetaTips: `Metadata is in JSON format (not searchable). If any chunk of this document is included in the prompt, it will be added to the LLM's prompt.
-Example:
-Metadata is:
+ 'A delimiter or separator can consist of one or multiple special characters. If it is multiple characters, ensure they are enclosed in backticks( ``). For example, if you configure your delimiters like this: \\n`##`;, then your texts will be separated at line breaks, double hash symbols (##), and semicolons.',
+ html4excel: 'Excel to HTML',
+ html4excelTip: `Use with the General chunking method. When disabled, spreadsheets (XLSX or XLS(Excel 97-2003)) in the knowledge base will be parsed into key-value pairs. When enabled, they will be parsed into HTML tables, splitting every 12 rows if the original table has more than 12 rows. See https://ragflow.io/docs/dev/enable_excel2html for details.`,
+ autoKeywords: 'Auto-keyword',
+ autoKeywordsTip: `Automatically extract N keywords for each chunk to increase their ranking for queries containing those keywords. Be aware that extra tokens will be consumed by the chat model specified in 'System model settings'. You can check or update the added keywords for a chunk from the chunk list. For details, see https://ragflow.io/docs/dev/autokeyword_autoquestion.`,
+ autoQuestions: 'Auto-question',
+ autoQuestionsTip: `Automatically extract N questions for each chunk to increase their ranking for queries containing those questions. You can check or update the added questions for a chunk from the chunk list. This feature will not disrupt the chunking process if an error occurs, except that it may add an empty result to the original chunk. Be aware that extra tokens will be consumed by the LLM specified in 'System model settings'. For details, see https://ragflow.io/docs/dev/autokeyword_autoquestion.`,
+ redo: 'Do you want to clear the existing {{chunkNum}} chunks?',
+ setMetaData: 'Set Meta Data',
+ pleaseInputJson: 'Please enter JSON',
+ documentMetaTips: `The meta data is in Json format(it's not searchable). It will be added into prompt for LLM if any chunks of this document are included in the prompt.
+Examples:
+The meta data is:
-{
- "Author": "Alex Dowson",
- "Date": "2024-11-12"
-}
+ {
+ "Author": "Alex Dowson",
+ "Date": "2024-11-12"
+ }
The prompt will be:
Document: the_name_of_document
Author: Alex Dowson
Date: 2024-11-12
-Related segments are as follows:
+Relevant fragments as following:
-- This is chunk content....
-- This is chunk content....
+- Here is the chunk content....
+- Here is the chunk content....
`,
- metaData: 'Metadata',
+ metaData: 'Meta data',
deleteDocumentConfirmContent:
- 'This document is associated with a knowledge graph. After deletion, related node and relationship information will be deleted, but the graph will not be updated immediately. The update graph action is performed during the process of parsing new documents that carry knowledge graph extraction tasks.',
+ 'The document is associated with the knowledge graph. After deletion, the related node and relationship information will be deleted, but the graph will not be updated immediately. The update graph action is performed during the process of parsing the new document that carries the knowledge graph extraction task.',
plainText: 'Naive',
- reRankModelWaring: 'Rerank model is very time-consuming.',
- theDocumentBeingParsedCannotBeDeleted: 'The document being parsed cannot be deleted',
+ reRankModelWaring: 'Re-rank model is very time consuming.',
},
knowledgeConfiguration: {
deleteGenerateModalContent: `
- Deleting the generated {{type}} results
- will remove all derived entities and relationships from this dataset.
- Your original files will remain unchanged.
-
- Do you want to continue?
+
Deleting the generated {{type}} results
+ will remove all derived entities and relationships from this dataset.
+ Your original files will remain intact.
+
+ Do you want to continue?
`,
- extractRaptor: 'Extract Raptor from documents',
- extractKnowledgeGraph: 'Extract knowledge graph from documents',
- filterPlaceholder: 'Please input',
+ extractRaptor: 'Extract Raptor',
+ extractKnowledgeGraph: 'Extract Knowledge Graph',
+ filterPlaceholder: 'please input filter',
fileFilterTip: '',
- fileFilter: 'Regex matching expression',
+ fileFilter: 'File Filter',
setDefaultTip: '',
- setDefault: 'Set default',
- eidtLinkDataPipeline: 'Edit data pipeline',
- linkPipelineSetTip: 'Manage data pipeline links with this dataset',
+ setDefault: 'Set as Default',
+ eidtLinkDataPipeline: 'Edit Data Pipeline',
+ linkPipelineSetTip: 'Manage data pipeline linkage with this dataset',
default: 'Default',
- dataPipeline: 'Data pipeline',
- linkDataPipeline: 'Link data pipeline',
- enableAutoGenerate: 'Enable auto generate',
- teamPlaceholder: 'Please select team',
- dataFlowPlaceholder: 'Please select data flow',
+ dataPipeline: 'Data Pipeline',
+ linkDataPipeline: 'Link Data Pipeline',
+ enableAutoGenerate: 'Enable Auto Generate',
+ teamPlaceholder: 'Please select a team.',
+ dataFlowPlaceholder: 'Please select a pipeline.',
buildItFromScratch: 'Build it from scratch',
- dataFlow: 'Data flow',
- parseType: 'Chunk method',
- manualSetup: 'Manual setup',
+ dataFlow: 'Pipeline',
+ parseType: 'Parse Type',
+ manualSetup: 'Manual Setup',
builtIn: 'Built-in',
- titleDescription: 'Update your knowledge base details here, especially the chunk method.',
+ titleDescription:
+ 'Update your knowledge base configuration here, particularly the chunking method.',
name: 'Knowledge base name',
photo: 'Knowledge base photo',
- photoTip: 'You can upload files up to 4MB',
+ photoTip: 'You can upload a file with 4 MB',
description: 'Description',
language: 'Document language',
- languageMessage: 'Please input language',
- languagePlaceholder: 'Please input language',
+ languageMessage: 'Please input your language!',
+ languagePlaceholder: 'Please input your language!',
permissions: 'Permissions',
embeddingModel: 'Embedding model',
- chunkTokenNumber: 'Suggested chunk size',
- chunkTokenNumberMessage: 'Chunk token number is required',
+ chunkTokenNumber: 'Recommended chunk size',
+ chunkTokenNumberMessage: 'Chunk token number for text is required',
embeddingModelTip:
- 'The default embedding model used by the knowledge base. Once text chunks have been generated in the knowledge base, you will not be able to change the default embedding model unless you delete all text chunks in the knowledge base.',
+ 'The default embedding model for the knowledge base. It cannot be changed once the knowledge base has chunks. To switch to a different default embedding model, you must delete all existing chunks in the knowledge base.',
permissionsTip:
- 'If the knowledge base permission is set to "Team", all team members can operate the knowledge base.',
+ "If it is set to 'Team', all your team members will be able to manage the knowledge base.",
chunkTokenNumberTip:
- 'The recommended token number threshold for generating text chunks. If the token number of the small text segment obtained by segmentation does not reach this threshold, it will continue to merge with subsequent text segments until merging the next text segment will exceed this threshold, and then a final text chunk will be generated. If the system never encounters a text segmentation delimiter when segmenting text segments, even if the token number of the text segment has exceeded this threshold, the system will not generate new text chunks.',
- chunkMethod: 'Chunk method',
- chunkMethodTip: 'The description is on the right.',
+ 'It kind of sets the token threshold for a creating a chunk. A segment with fewer tokens than this threshold will be combined with the following segments until the token count exceeds the threshold, at which point a chunk is created. No new chunk is created unless a delimiter is encountered, even if the threshold is exceeded.',
+ chunkMethod: 'Chunking method',
+ chunkMethodTip: 'View the tips on the right.',
upload: 'Upload',
english: 'English',
chinese: 'Chinese',
- embeddingModelPlaceholder: 'Please select embedding model',
- chunkMethodPlaceholder: 'Please select chunk method',
+ portugueseBr: 'Portuguese (Brazil)',
+ embeddingModelPlaceholder: 'Please select a embedding model.',
+ chunkMethodPlaceholder: 'Please select a chunking method.',
save: 'Save',
me: 'Only me',
team: 'Team',
cancel: 'Cancel',
- methodTitle: 'Chunk method description',
+ methodTitle: 'Chunking method description',
methodExamples: 'Examples',
methodExamplesDescription:
- 'To help you understand better, we provide relevant screenshots for your reference.',
- dialogueExamplesTitle: 'Dialogue examples',
- methodEmpty: 'This will show a visual explanation of the knowledge base category',
+ 'The following screenshots are provided for clarification.',
+ dialogueExamplesTitle: 'view',
+ methodEmpty:
+ 'This will display a visual explanation of the knowledge base categories',
book: `
Supported file formats are DOCX, PDF, TXT.
- Since a book is very long, not all parts are useful. If it is a PDF,
- please set page ranges for each book to eliminate negative effects and save analysis computing time.
`,
+ For each book in PDF, please set the page ranges to remove unwanted information and reduce analysis time.
`,
laws: `Supported file formats are DOCX, PDF, TXT.
- Legal documents have very strict writing formats. We use text features to detect split points.
+ Legal documents typically follow a rigorous writing format. We use text feature to identify split point.
- The granularity of chunks is consistent with 'ARTICLE', and all upper-level text will be included in the chunk.
+ The chunk has a granularity consistent with 'ARTICLE', ensuring all upper level text is included in the chunk.
`,
manual: `Only PDF is supported.
- We assume that the manual has a hierarchical section structure. We use the lowest section title as the pivot for slicing the document.
- Therefore, figures and tables in the same section will not be split, and the chunk size may be large.
+ We assume that the manual has a hierarchical section structure, using the lowest section titles as basic unit for chunking documents. Therefore, figures and tables in the same section will not be separated, which may result in larger chunk sizes.
`,
naive: `Supported file formats are MD, MDX, DOCX, XLSX, XLS (Excel 97-2003), PPT, PDF, TXT, JPEG, JPG, PNG, TIF, GIF, CSV, JSON, EML, HTML.
- This method applies a simple approach to chunk files:
+ This method chunks files using a 'naive' method:
-
The system will use a visual detection model to split continuous text into multiple segments.
- Next, these continuous segments are merged into chunks with no more than "Token number" tokens.`,
- paper: `Only PDF files are supported.
- If our model works well, the paper will be sliced by its sections, such as Abstract, 1.1, 1.2, etc.
- The advantage of this is that the LLM can better summarize the content of relevant sections in the paper,
- produce more comprehensive answers, and help readers better understand the paper.
- The disadvantage is that it increases the context of LLM conversations and increases computational costs,
- so during the conversation, you can consider reducing the 'topN' setting.
`,
+ Use vision detection model to split the texts into smaller segments.
+ Then, combine adjacent segments until the token count exceeds the threshold specified by 'Chunk token number for text', at which point a chunk is created.`,
+ paper: `Only PDF file is supported.
+ Papers will be split by section, such as abstract, 1.1, 1.2.
+ This approach enables the LLM to summarize the paper more effectively and to provide more comprehensive, understandable responses.
+ However, it also increases the context for AI conversations and adds to the computational cost for the LLM. So during a conversation, consider reducing the value of ‘topN’.
`,
presentation: `Supported file formats are PDF, PPTX.
- Each page will be treated as a chunk. And thumbnails of each page will be stored.
- All PPT files you upload will be automatically chunked using this method, without the need to set it for each PPT file.
`,
- qa: `
- This chunk method supports excel and csv/txt file formats.
+ Every page in the slides is treated as a chunk, with its thumbnail image stored.
+ This chunking method is automatically applied to all uploaded PPT files, so you do not need to specify it manually.
`,
+ qa: `
+
+ This chunking method supports XLSX and CSV/TXT file formats.
- If the file is in excel format, it should consist of two columns
- without headers: one for questions and another for answers,
- with the question column before the answer column. Multiple sheets are
- acceptable as long as the columns are correctly structured.
+ If a file is in XLSX or XLS (Excel 97-2003) format, it should contain two columns without headers: one for questions and the other for answers, with the question column preceding the answer column. Multiple sheets are
+ acceptable, provided the columns are properly structured.
- If the file is in csv/txt format,
- it should be UTF-8 encoded and use TAB as the delimiter to separate questions and answers.
+ If a file is in CSV/TXT format, it must be UTF-8 encoded with TAB as the delimiter to separate questions and answers.
- Text lines that fail to follow the above rules will be ignored, and
- each Q&A pair will be considered a unique chunk.
+ Lines of texts that fail to follow the above rules will be ignored, and
+ each Q&A pair will be considered a distinct chunk.
-
`,
+
+ `,
resume: `Supported file formats are DOCX, PDF, TXT.
- Resumes come in various formats, just like a person's personality, but we often have to organize them into structured data for easy searching.
-
- Instead of chunking resumes, we parse resumes into structured data. As an HR, you can throw away all resumes,
- and you only need to talk to 'RAGFlow' to list all qualified candidates.
+ Résumés of various forms are parsed and organized into structured data to facilitate candidate search for recruiters.
- `,
- table: `Supports XLSX and CSV/TXT format files.
- Here are some tips:
+ `,
+ table: `
Supported file formats are XLSX and CSV/TXT.
+ Here are some prerequisites and tips:
`,
picture: `
- Supports image files. Video coming soon.
- If there is text in the image, OCR is applied to extract the text as its text description.
-
- If the text extracted by OCR is not enough, you can use visual LLM to get the description.
-
`,
+ Image files are supported, with video support coming soon.
+ This method employs an OCR model to extract texts from images.
+
+ If the text extracted by the OCR model is deemed insufficient, a specified visual LLM will be used to provide a description of the image.
+
`,
one: `
- Supported file formats are MD, MDX, DOCX, XLSX, XLS (Excel 97-2003), PPT, PDF, TXT, JPEG, JPG, PNG, TIF, GIF, CSV, JSON, EML, HTML.
- This method treats the entire document as one chunk.
`,
- email: `Supported file formats are EML.
- Each email will be treated as a chunk.
-
`,
- knowledgeGraph: `Supported file formats are MD, MDX, DOCX, XLSX, XLS (Excel 97-2003), PPT, PDF, TXT, JPEG, JPG, PNG, TIF, GIF, CSV, JSON, EML, HTML.
- This method will extract entities and relationships from documents and store them in a knowledge graph.
`,
+ Supported file formats are DOCX, XLSX, XLS (Excel 97-2003), PDF, TXT.
+
+ This method treats each document in its entirety as a chunk.
+
+ Applicable when you require the LLM to summarize the entire document, provided it can handle that amount of context length.
+
`,
+ knowledgeGraph: `Supported file formats are DOCX, EXCEL, PPT, IMAGE, PDF, TXT, MD, JSON, EML
+
+
This approach chunks files using the 'naive'/'General' method. It splits a document into segments and then combines adjacent segments until the token count exceeds the threshold specified by 'Chunk token number for text', at which point a chunk is created.
+The chunks are then fed to the LLM to extract entities and relationships for a knowledge graph and a mind map.
+Ensure that you set the Entity types.
`,
+ tag: `A knowledge base using the 'Tag' chunking method functions as a tag set. Other knowledge bases use it to tag their chunks, and queries to these knowledge bases are also tagged using this tag set.
+A tag set will NOT be directly involved in a Retrieval-Augmented Generation (RAG) process.
+Each chunk in this knowledge base is an independent description-tag pair.
+Supported file formats include XLSX and CSV/TXT:
+If a file is in XLSX format, it should contain two columns without headers: one for tag descriptions and the other for tag names, with the Description column preceding the Tag column. Multiple sheets are acceptable, provided the columns are properly structured.
+If a file is in CSV/TXT format, it must be UTF-8 encoded with TAB as the delimiter to separate descriptions and tags.
+In a Tag column, comma is used to separate tags.
+Lines of texts that fail to follow the above rules will be ignored.
+`,
+ useRaptor: 'RAPTOR',
+ useRaptorTip:
+ 'Enable RAPTOR for multi-hop question-answering tasks. See https://ragflow.io/docs/dev/enable_raptor for details.',
+ prompt: 'Prompt',
+ promptTip:
+ 'Use the system prompt to describe the task for the LLM, specify how it should respond, and outline other miscellaneous requirements. The system prompt is often used in conjunction with keys (variables), which serve as various data inputs for the LLM. Use a forward slash `/` or the (x) button to show the keys to use.',
+ promptMessage: 'Prompt is required',
+ promptText: `Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:
+ {cluster_content}
+The above is the content you need to summarize.`,
+ maxToken: 'Max token',
+ maxTokenTip: 'The maximum number of tokens per generated summary chunk.',
+ maxTokenMessage: 'Max token is required',
+ threshold: 'Threshold',
+ thresholdTip:
+ 'In RAPTOR, chunks are clustered by their semantic similarity. The Threshold parameter sets the minimum similarity required for chunks to be grouped together. A higher Threshold means fewer chunks in each cluster, while a lower one means more.',
+ thresholdMessage: 'Threshold is required',
+ maxCluster: 'Max cluster',
+ maxClusterTip: 'The maximum number of clusters to create.',
+ maxClusterMessage: 'Max cluster is required',
+ randomSeed: 'Random seed',
+ randomSeedMessage: 'Random seed is required',
+ entityTypes: 'Entity types',
+ vietnamese: 'Vietnamese',
+ pageRank: 'Page rank',
+ pageRankTip: `You can assign a higher PageRank score to specific knowledge bases during retrieval. The corresponding score is added to the hybrid similarity scores of retrieved chunks from these knowledge bases, increasing their ranking. See https://ragflow.io/docs/dev/set_page_rank for details.`,
+ tagName: 'Tag',
+ frequency: 'Frequency',
+ searchTags: 'Search tags',
+ tagCloud: 'Cloud',
+ tagTable: 'Table',
+ tagSet: 'Tag sets',
+ tagSetTip: `
+ Select one or multiple tag knowledge bases to auto-tag chunks in your knowledge base. See https://ragflow.io/docs/dev/use_tag_sets for details.
+The user query will also be auto-tagged.
+This auto-tagging feature enhances retrieval by adding another layer of domain-specific knowledge to the existing dataset.
+Difference between auto-tag and auto-keyword:
+
+ - A tag knowledge base is a user-defined close set, whereas keywords extracted by the LLM can be regarded as an open set.
+ - You must upload tag sets in specified formats before running the auto-tag feature.
+ - The auto-keyword feature is dependent on the LLM and consumes a significant number of tokens.
+
+ `,
+ topnTags: 'Top-N Tags',
+ tags: 'Tags',
+ addTag: 'Add tag',
+ useGraphRag: 'Knowledge graph',
+ useGraphRagTip:
+ 'Construct a knowledge graph over file chunks of the current knowledge base to enhance multi-hop question-answering involving nested logic. See https://ragflow.io/docs/dev/construct_knowledge_graph for details.',
+ graphRagMethod: 'Method',
+ graphRagMethodTip: `Light: (Default) Use prompts provided by github.com/HKUDS/LightRAG to extract entities and relationships. This option consumes fewer tokens, less memory, and fewer computational resources.
+ General: Use prompts provided by github.com/microsoft/graphrag to extract entities and relationships`,
+ resolution: 'Entity resolution',
+ resolutionTip: `An entity deduplication switch. When enabled, the LLM will combine similar entities - e.g., '2025' and 'the year of 2025', or 'IT' and 'Information Technology' - to construct a more accurate graph`,
+ community: 'Community reports',
+ communityTip:
+ 'In a knowledge graph, a community is a cluster of entities linked by relationships. You can have the LLM generate an abstract for each community, known as a community report. See here for more information: https://www.microsoft.com/en-us/research/blog/graphrag-improving-global-search-via-dynamic-community-selection/',
+ theDocumentBeingParsedCannotBeDeleted:
+ 'The document being parsed cannot be deleted',
},
- dashboard: {
- title: 'Dashboard',
- knowledgeBaseStatus: 'Knowledge Base Status',
- documents: 'Documents',
- sources: 'Sources',
- vectors: 'Vectors',
- recentActivity: 'Recent Activity',
- noActivity: 'No activity',
- systemHealth: 'System Health',
- healthy: 'Healthy',
- warning: 'Warning',
- error: 'Error',
+ chunk: {
+ chunk: 'Chunk',
+ bulk: 'Bulk',
+ selectAll: 'Select All',
+ enabledSelected: 'Enable selected',
+ disabledSelected: 'Disable selected',
+ deleteSelected: 'Delete selected',
+ search: 'Search',
+ all: 'All',
+ enabled: 'Enabled',
+ disabled: 'Disabled',
+ keyword: 'Keyword',
+ function: 'Function',
+ chunkMessage: 'Please input value!',
+ full: 'Full text',
+ ellipse: 'Ellipse',
+ graph: 'Knowledge graph',
+ mind: 'Mind map',
+ question: 'Question',
+ questionTip: `If there are given questions, the embedding of the chunk will be based on them.`,
+ chunkResult: 'Chunk Result',
+ chunkResultTip: `View the chunked segments used for embedding and retrieval.`,
+ enable: 'Enable',
+ disable: 'Disable',
+ delete: 'Delete',
},
- time: {
- justNow: 'Just now',
- minutesAgo: '{{count}} minutes ago',
- hoursAgo: '{{count}} hours ago',
- daysAgo: '{{count}} days ago',
- weeksAgo: '{{count}} weeks ago',
- monthsAgo: '{{count}} months ago',
- yearsAgo: '{{count}} years ago',
+ chat: {
+ messagePlaceholder: 'Type your message here...',
+ exit: 'Exit',
+ multipleModels: 'Multiple Models',
+ applyModelConfigs: 'Apply model configs',
+ conversations: 'Conversations',
+ chatApps: 'Chat Apps',
+ newConversation: 'New conversation',
+ createAssistant: 'Create an Assistant',
+ assistantSetting: 'Assistant settings',
+ promptEngine: 'Prompt engine',
+ modelSetting: 'Model settings',
+ chat: 'Chat',
+ newChat: 'New chat',
+ send: 'Send',
+ sendPlaceholder: 'Message the assistant...',
+ chatConfiguration: 'Chat Configuration',
+ chatConfigurationDescription:
+ ' Set up a chat assistant for your selected datasets (knowledge bases) here! 💕',
+ assistantName: 'Assistant name',
+ assistantNameMessage: 'Assistant name is required',
+ namePlaceholder: 'e.g. Resume Jarvis',
+ assistantAvatar: 'Assistant avatar',
+ language: 'Language',
+ emptyResponse: 'Empty response',
+ emptyResponseTip: `Set this as a response if no results are retrieved from the knowledge bases for your query, or leave this field blank to allow the LLM to improvise when nothing is found.`,
+ emptyResponseMessage: `Empty response will be triggered when nothing relevant is retrieved from knowledge bases. You must clear the 'Empty response' field if no knowledge base is selected.`,
+ setAnOpener: 'Opening greeting',
+ setAnOpenerInitial: `Hi! I'm your assistant. What can I do for you?`,
+ setAnOpenerTip: 'Set an opening greeting for users.',
+ knowledgeBases: 'Knowledge bases',
+ knowledgeBasesMessage: 'Please select',
+ knowledgeBasesTip:
+ 'Select the knowledge bases to associate with this chat assistant. An empty knowledge base will not appear in the dropdown list.',
+ system: 'System prompt',
+ systemInitialValue: `You are an intelligent assistant. Please summarize the content of the knowledge base to answer the question. Please list the data in the knowledge base and answer in detail. When all knowledge base content is irrelevant to the question, your answer must include the sentence "The answer you are looking for is not found in the knowledge base!" Answers need to consider chat history.
+ Here is the knowledge base:
+ {knowledge}
+ The above is the knowledge base.`,
+ systemMessage: 'Please input!',
+ systemTip:
+ 'Your prompts or instructions for the LLM, including but not limited to its role, the desired length, tone, and language of its answers. If your model has native support for reasoning, you can add //no_thinking add the prompt to stop reasoning.',
+ topN: 'Top N',
+ topNTip: `Not all chunks with similarity score above the 'similarity threshold' will be sent to the LLM. This selects 'Top N' chunks from the retrieved ones.`,
+ variable: 'Variable',
+ variableTip: `Used together with RAGFlow's chat assistant management APIs, variables can help develop more flexible system prompt strategies. The defined variables will be used by 'System prompt' as part of the prompts for the LLM. {knowledge} is a reserved special variable representing chunks retrieved from specified knowledge base(s), and all variables should be enclosed in curly braces {} in the 'System prompt'. See https://ragflow.io/docs/dev/set_chat_variables for details.`,
+ add: 'Add',
+ key: 'Key',
+ optional: 'Optional',
+ operation: 'Operation',
+ model: 'Model',
+ modelTip: 'Large language chat model',
+ modelMessage: 'Please select!',
+ modelEnabledTools: 'Enabled tools',
+ modelEnabledToolsTip:
+ 'Please select one or more tools for the chat model to use. It takes no effect for models not supporting tool call.',
+ freedom: 'Creativity',
+ improvise: 'Improvise',
+ precise: 'Precise',
+ balance: 'Balance',
+ custom: 'Custom',
+ freedomTip: `A shortcut to 'Temperature', 'Top P', 'Presence penalty', and 'Frequency penalty' settings, indicating the freedom level of the model. This parameter has three options: Select 'Improvise' to produce more creative responses; select 'Precise' (default) to produce more conservative responses; 'Balance' is a middle ground between 'Improvise' and 'Precise'.`,
+ temperature: 'Temperature',
+ temperatureMessage: 'Temperature is required',
+ temperatureTip: `This parameter controls the randomness of the model's predictions. A lower temperature results in more conservative responses, while a higher temperature yields more creative and diverse responses.`,
+ topP: 'Top P',
+ topPMessage: 'Top P is required',
+ topPTip:
+ 'Also known as "nucleus sampling", this parameter sets a threshold for selecting a smaller set of the most likely words to sample from, cutting off the less probable ones.',
+ presencePenalty: 'Presence penalty',
+ presencePenaltyMessage: 'Presence penalty is required',
+ presencePenaltyTip:
+ 'This discourages the model from repeating the same information by penalizing words that have already appeared in the conversation.',
+ frequencyPenalty: 'Frequency penalty',
+ frequencyPenaltyMessage: 'Frequency penalty is required',
+ frequencyPenaltyTip:
+ 'Similar to the presence penalty, this reduces the model’s tendency to repeat the same words frequently.',
+ maxTokens: 'Max tokens',
+ maxTokensMessage: 'Max tokens is required',
+ maxTokensTip: `This sets the maximum length of the model's output, measured in the number of tokens (words or pieces of words). Defaults to 512. If disabled, you lift the maximum token limit, allowing the model to determine the number of tokens in its responses.`,
+ maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
+ maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
+ quote: 'Show quote',
+ quoteTip: 'Whether to display the original text as a reference.',
+ selfRag: 'Self-RAG',
+ selfRagTip: 'Please refer to: https://huggingface.co/papers/2310.11511',
+ overview: 'Chat ID',
+ pv: 'Number of messages',
+ uv: 'Active user number',
+ speed: 'Token output speed',
+ tokens: 'Consume the token number',
+ round: 'Session Interaction Number',
+ thumbUp: 'customer satisfaction',
+ preview: 'Preview',
+ embedded: 'Embedded',
+ serviceApiEndpoint: 'Service API Endpoint',
+ apiKey: 'API KEY',
+ apiReference: 'API Documents',
+ dateRange: 'Date Range:',
+ backendServiceApi: 'API Server',
+ createNewKey: 'Create new key',
+ created: 'Created',
+ action: 'Action',
+ embedModalTitle: 'Embed into webpage',
+ comingSoon: 'Coming soon',
+ fullScreenTitle: 'Full Embed',
+ fullScreenDescription:
+ 'Embed the following iframe into your website at the desired location',
+ partialTitle: 'Partial Embed',
+ extensionTitle: 'Chrome Extension',
+ tokenError: 'Please create API key first.',
+ betaError:
+ 'Please acquire a RAGFlow API key from the System Settings page first.',
+ searching: 'Searching...',
+ parsing: 'Parsing',
+ uploading: 'Uploading',
+ uploadFailed: 'Upload failed',
+ regenerate: 'Regenerate',
+ read: 'Read content',
+ tts: 'Text to speech',
+ ttsTip:
+ 'Ensure you select a TTS model on the Settings page before enabling this toggle to play text as audio.',
+ relatedQuestion: 'Related question',
+ answerTitle: 'R',
+ multiTurn: 'Multi-turn optimization',
+ multiTurnTip:
+ 'This optimizes user queries using context in a multi-round conversation. When enabled, it will consume additional LLM tokens.',
+ howUseId: 'How to use chat ID?',
+ description: 'Description of assistant',
+ descriptionPlaceholder: 'e.g. A chat assistant for resume.',
+ useKnowledgeGraph: 'Use knowledge graph',
+ useKnowledgeGraphTip:
+ 'Whether to use knowledge graph(s) in the specified knowledge base(s) during retrieval for multi-hop question answering. When enabled, this would involve iterative searches across entity, relationship, and community report chunks, greatly increasing retrieval time.',
+ keyword: 'Keyword analysis',
+ keywordTip: `Use LLM to analyze user's questions, extract keywords which will be emphasize during the relevance computation. Works well with lengthy queries but will increase response time.`,
+ languageTip:
+ 'Allows sentence rewriting with the specified language or defaults to the latest question if not selected.',
+ avatarHidden: 'Hide avatar',
+ locale: 'Locale',
+ selectLanguage: 'Select a language',
+ reasoning: 'Reasoning',
+ reasoningTip: `Whether to enable a reasoning workflow during question answering, as seen in models like Deepseek-R1 or OpenAI o1. When enabled, this allows the model to access external knowledge and tackle complex questions in a step-by-step manner, leveraging techniques like chain-of-thought reasoning. This approach enhances the model's ability to provide accurate responses by breaking down problems into manageable steps, improving performance on tasks that require logical reasoning and multi-step thinking.`,
+ tavilyApiKeyTip:
+ 'If an API key is correctly set here, Tavily-based web searches will be used to supplement knowledge base retrieval.',
+ tavilyApiKeyMessage: 'Please enter your Tavily API Key',
+ tavilyApiKeyHelp: 'How to get it?',
+ crossLanguage: 'Cross-language search',
+ crossLanguageTip: `Select one or more languages for cross‑language search. If no language is selected, the system searches with the original query.`,
+ createChat: 'Create chat',
+ metadata: 'Meta Data',
+ metadataTip:
+ 'Metadata filtering is the process of using metadata attributes (such as tags, categories, or access permissions) to refine and control the retrieval of relevant information within a system.',
+ conditions: 'Conditions',
+ addCondition: 'Add Condition',
+ meta: {
+ disabled: 'Disabled',
+ automatic: 'Automatic',
+ manual: 'Manual',
+ },
+ cancel: 'Cancel',
+ chatSetting: 'Chat setting',
+ },
+ setting: {
+ profile: 'Profile',
+ avatar: 'Avatar',
+ avatarTip: 'This will be displayed on your profile.',
+ profileDescription: 'Update your photo and personal details here.',
+ maxTokens: 'Max Tokens',
+ maxTokensMessage: 'Max Tokens is required',
+ maxTokensTip: `This sets the maximum length of the model's output, measured in the number of tokens (words or pieces of words). Defaults to 512. If disabled, you lift the maximum token limit, allowing the model to determine the number of tokens in its responses.`,
+ maxTokensInvalidMessage: 'Please enter a valid number for Max Tokens.',
+ maxTokensMinMessage: 'Max Tokens cannot be less than 0.',
+ password: 'Password',
+ passwordDescription:
+ 'Please enter your current password to change your password.',
+ model: 'Model providers',
+ modelDescription: 'Configure model parameters and API KEY here.',
+ team: 'Team',
+ system: 'System',
+ logout: 'Log out',
+ api: 'API',
+ username: 'Username',
+ usernameMessage: 'Please input your username!',
+ photo: 'Your photo',
+ photoDescription: 'This will be displayed on your profile.',
+ colorSchema: 'Color schema',
+ colorSchemaMessage: 'Please select your color schema!',
+ colorSchemaPlaceholder: 'select your color schema',
+ bright: 'Bright',
+ dark: 'Dark',
+ timezone: 'Time zone',
+ timezoneMessage: 'Please input your timezone!',
+ timezonePlaceholder: 'select your timezone',
+ email: 'Email address',
+ emailDescription: 'Once registered, E-mail cannot be changed.',
+ currentPassword: 'Current password',
+ currentPasswordMessage: 'Please input your password!',
+ newPassword: 'New password',
+ changePassword: 'Change Password',
+ newPasswordMessage: 'Please input your password!',
+ newPasswordDescription:
+ 'Your new password must be more than 8 characters.',
+ confirmPassword: 'Confirm new password',
+ confirmPasswordMessage: 'Please confirm your password!',
+ confirmPasswordNonMatchMessage:
+ 'The new password that you entered do not match!',
+ cancel: 'Cancel',
+ addedModels: 'Added models',
+ modelsToBeAdded: 'Models to be added',
+ addTheModel: 'Add Model',
+ apiKey: 'API-Key',
+ apiKeyMessage:
+ 'Please enter the API key (for locally deployed model,ignore this).',
+ apiKeyTip:
+ 'The API key can be obtained by registering the corresponding LLM supplier.',
+ showMoreModels: 'View Models',
+ hideModels: 'Hide Models',
+ baseUrl: 'Base-Url',
+ baseUrlTip:
+ 'If your API key is from OpenAI, just ignore it. Any other intermediate providers will give this base url with the API key.',
+ tongyiBaseUrlTip:
+ 'For Chinese users, no need to fill in or use https://dashscope.aliyuncs.com/compatible-mode/v1. For international users, use https://dashscope-intl.aliyuncs.com/compatible-mode/v1',
+ tongyiBaseUrlPlaceholder: '(International users only, please see tip)',
+ modify: 'Modify',
+ systemModelSettings: 'Set default models',
+ chatModel: 'Chat model',
+ chatModelTip:
+ 'The default chat model for each newly created knowledge base.',
+ embeddingModel: 'Embedding model',
+ embeddingModelTip:
+ 'The default embedding model for each newly created knowledge base. If you cannot find an embedding model from the dropdown, check if you are using RAGFlow slim edition (which does not include embedding models) or check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
+ img2txtModel: 'Img2txt model',
+ img2txtModelTip:
+ 'The default img2txt model for each newly created knowledge base. It describes a picture or video. If you cannot find a model from the dropdown, check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
+ sequence2txtModel: 'Speech2txt model',
+ sequence2txtModelTip:
+ 'The default ASR model for each newly created knowledgebase. Use this model to translate voices to corresponding text.',
+ rerankModel: 'Rerank model',
+ rerankModelTip: `The default rerank model for reranking chunks. If you cannot find a model from the dropdown, check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.`,
+ ttsModel: 'TTS Model',
+ ttsModelTip:
+ 'The default text-to-speech model. If you cannot find a model from the dropdown, check https://ragflow.io/docs/dev/supported_models to see if your model provider supports this model.',
+ workspace: 'Workspace',
+ upgrade: 'Upgrade',
+ addLlmTitle: 'Add LLM',
+ editLlmTitle: 'Edit {{name}} Model',
+ editModel: 'Edit Model',
+ modelName: 'Model name',
+ modelID: 'Model ID',
+ modelUid: 'Model UID',
+ modelNameMessage: 'Please input your model name!',
+ modelType: 'Model type',
+ modelTypeMessage: 'Please input your model type!',
+ addLlmBaseUrl: 'Base url',
+ baseUrlNameMessage: 'Please input your base url!',
+ vision: 'Does it support Vision?',
+ ollamaLink: 'How to integrate {{name}}',
+ FishAudioLink: 'How to use FishAudio',
+ TencentCloudLink: 'How to use TencentCloud ASR',
+ volcModelNameMessage: 'Please input your model name!',
+ addEndpointID: 'EndpointID of the model',
+ endpointIDMessage: 'Please input your EndpointID of the model',
+ addArkApiKey: 'VOLC ARK_API_KEY',
+ ArkApiKeyMessage: 'Please input your ARK_API_KEY',
+ bedrockModelNameMessage: 'Please input your model name!',
+ addBedrockEngineAK: 'ACCESS KEY',
+ bedrockAKMessage: 'Please input your ACCESS KEY',
+ addBedrockSK: 'SECRET KEY',
+ bedrockSKMessage: 'Please input your SECRET KEY',
+ bedrockRegion: 'AWS Region',
+ bedrockRegionMessage: 'Please select!',
+ 'us-east-2': 'US East (Ohio)',
+ 'us-east-1': 'US East (N. Virginia)',
+ 'us-west-1': 'US West (N. California)',
+ 'us-west-2': 'US West (Oregon)',
+ 'af-south-1': 'Africa (Cape Town)',
+ 'ap-east-1': 'Asia Pacific (Hong Kong)',
+ 'ap-south-2': 'Asia Pacific (Hyderabad)',
+ 'ap-southeast-3': 'Asia Pacific (Jakarta)',
+ 'ap-southeast-5': 'Asia Pacific (Malaysia)',
+ 'ap-southeast-4': 'Asia Pacific (Melbourne)',
+ 'ap-south-1': 'Asia Pacific (Mumbai)',
+ 'ap-northeast-3': 'Asia Pacific (Osaka)',
+ 'ap-northeast-2': 'Asia Pacific (Seoul)',
+ 'ap-southeast-1': 'Asia Pacific (Singapore)',
+ 'ap-southeast-2': 'Asia Pacific (Sydney)',
+ 'ap-east-2': 'Asia Pacific (Taipei)',
+ 'ap-southeast-7': 'Asia Pacific (Thailand)',
+ 'ap-northeast-1': 'Asia Pacific (Tokyo)',
+ 'ca-central-1': 'Canada (Central)',
+ 'ca-west-1': 'Canada West (Calgary)',
+ 'eu-central-1': 'Europe (Frankfurt)',
+ 'eu-west-1': 'Europe (Ireland)',
+ 'eu-west-2': 'Europe (London)',
+ 'eu-south-1': 'Europe (Milan)',
+ 'eu-west-3': 'Europe (Paris)',
+ 'eu-south-2': 'Europe (Spain)',
+ 'eu-north-1': 'Europe (Stockholm)',
+ 'eu-central-2': 'Europe (Zurich)',
+ 'il-central-1': 'Israel (Tel Aviv)',
+ 'mx-central-1': 'Mexico (Central)',
+ 'me-south-1': 'Middle East (Bahrain)',
+ 'me-central-1': 'Middle East (UAE)',
+ 'sa-east-1': 'South America (São Paulo)',
+ 'us-gov-east-1': 'AWS GovCloud (US-East)',
+ 'us-gov-west-1': 'AWS GovCloud (US-West)',
+ addHunyuanSID: 'Hunyuan Secret ID',
+ HunyuanSIDMessage: 'Please input your Secret ID',
+ addHunyuanSK: 'Hunyuan Secret Key',
+ HunyuanSKMessage: 'Please input your Secret Key',
+ addTencentCloudSID: 'TencentCloud Secret ID',
+ TencentCloudSIDMessage: 'Please input your Secret ID',
+ addTencentCloudSK: 'TencentCloud Secret Key',
+ TencentCloudSKMessage: 'Please input your Secret Key',
+ SparkModelNameMessage: 'Please select Spark model',
+ addSparkAPIPassword: 'Spark APIPassword',
+ SparkAPIPasswordMessage: 'please input your APIPassword',
+ addSparkAPPID: 'Spark APP ID',
+ SparkAPPIDMessage: 'please input your APP ID',
+ addSparkAPISecret: 'Spark APISecret',
+ SparkAPISecretMessage: 'please input your APISecret',
+ addSparkAPIKey: 'Spark APIKey',
+ SparkAPIKeyMessage: 'please input your APIKey',
+ yiyanModelNameMessage: 'Please input model name',
+ addyiyanAK: 'yiyan API KEY',
+ yiyanAKMessage: 'Please input your API KEY',
+ addyiyanSK: 'yiyan Secret KEY',
+ yiyanSKMessage: 'Please input your Secret KEY',
+ FishAudioModelNameMessage:
+ 'Please give your speech synthesis model a name',
+ addFishAudioAK: 'Fish Audio API KEY',
+ addFishAudioAKMessage: 'Please input your API KEY',
+ addFishAudioRefID: 'FishAudio Reference ID',
+ addFishAudioRefIDMessage:
+ 'Please input the Reference ID (leave blank to use the default model).',
+ GoogleModelIDMessage: 'Please input your model ID!',
+ addGoogleProjectID: 'Project ID',
+ GoogleProjectIDMessage: 'Please input your Project ID',
+ addGoogleServiceAccountKey:
+ 'Service Account Key(Leave blank if you use Application Default Credentials)',
+ GoogleServiceAccountKeyMessage:
+ 'Please input Google Cloud Service Account Key in base64 format',
+ addGoogleRegion: 'Google Cloud Region',
+ GoogleRegionMessage: 'Please input Google Cloud Region',
+ modelProvidersWarn: `Please add both embedding model and LLM in Settings > Model providers first. Then, set them in 'Set default models'.`,
+ apiVersion: 'API-Version',
+ apiVersionMessage: 'Please input API version',
+ add: 'Add',
+ updateDate: 'Update Date',
+ role: 'Role',
+ invite: 'Invite',
+ agree: 'Accept',
+ refuse: 'Decline',
+ teamMembers: 'Team Members',
+ joinedTeams: 'Joined Teams',
+ sureDelete: 'Are you sure to remove this member?',
+ quit: 'Quit',
+ sureQuit: 'Are you sure you want to quit the team you joined?',
+ secretKey: 'Secret key',
+ publicKey: 'Public key',
+ secretKeyMessage: 'Please enter the secret key',
+ publicKeyMessage: 'Please enter the public key',
+ hostMessage: 'Please enter the host',
+ configuration: 'Configuration',
+ langfuseDescription:
+ 'Traces, evals, prompt management and metrics to debug and improve your LLM application.',
+ viewLangfuseSDocumentation: "View Langfuse's documentation",
+ view: 'View',
+ modelsToBeAddedTooltip:
+ 'If your model provider is not listed but claims to be "OpenAI-compatible", select the OpenAI-API-compatible card to add the relevant model(s). ',
+ mcp: 'MCP',
+ },
+ message: {
+ registered: 'Registered!',
+ logout: 'logout',
+ logged: 'logged!',
+ pleaseSelectChunk: 'Please select chunk!',
+ registerDisabled: 'User registration is disabled',
+ modified: 'Modified',
+ created: 'Created',
+ deleted: 'Deleted',
+ renamed: 'Renamed',
+ operated: 'Operated',
+ updated: 'Updated',
+ uploaded: 'Uploaded',
+ 200: 'The server successfully returns the requested data.',
+ 201: 'Create or modify data successfully.',
+ 202: 'A request has been queued in the background (asynchronous task).',
+ 204: 'Data deleted successfully.',
+ 400: 'There was an error in the request issued, and the server did not create or modify data.',
+ 401: 'Please sign in again.',
+ 403: 'The user is authorized, but access is prohibited.',
+ 404: 'The request was made for a record that does not exist, and the server did not perform the operation.',
+ 406: 'The requested format is not available.',
+ 410: 'The requested resource has been permanently deleted and will not be available again.',
+ 413: 'The total size of the files uploaded at once is too large.',
+ 422: 'When creating an object, a validation error occurred.',
+ 500: 'A server error occurred, please check the server.',
+ 502: 'Gateway error.',
+ 503: 'The service is unavailable and the server is temporarily overloaded or undergoing maintenance.',
+ 504: 'Gateway timeout.',
+ requestError: 'Request error',
+ networkAnomalyDescription:
+ 'There is an abnormality in your network and you cannot connect to the server.',
+ networkAnomaly: 'network anomaly',
+ hint: 'hint',
+ },
+ fileManager: {
+ files: 'Files',
+ name: 'Name',
+ uploadDate: 'Upload Date',
+ knowledgeBase: 'Dataset',
+ size: 'Size',
+ action: 'Action',
+ addToKnowledge: 'Link to Knowledge Base',
+ pleaseSelect: 'Please select',
+ newFolder: 'New Folder',
+ file: 'File',
+ uploadFile: 'Upload File',
+ parseOnCreation: 'Parse on creation',
+ directory: 'Directory',
+ uploadTitle: 'Drag and drop your file here to upload',
+ uploadDescription:
+ 'Supports single or batch file upload. For a locally deployed RAGFlow: the total file size limit per upload is 1GB, with a batch upload limit of 32 files. There is no cap on the total number of files per account. For demo.ragflow.io, the total file size limit per upload is 10MB, with each file not exceeding 10MB and a maximum of 128 files per account.',
+ local: 'Local uploads',
+ s3: 'S3 uploads',
+ preview: 'Preview',
+ fileError: 'File error',
+ uploadLimit:
+ 'Each file must not exceed 10MB, and the total number of files must not exceed 128.',
+ destinationFolder: 'Destination folder',
+ pleaseUploadAtLeastOneFile: 'Please upload at least one file',
+ },
+ flow: {
+ recommended: 'Recommended',
+ customerSupport: 'Customer Support',
+ marketing: 'Marketing',
+ consumerApp: 'Consumer App',
+ other: 'Other',
+ agents: 'Agents',
+ days: 'Days',
+ beginInput: 'Begin Input',
+ ref: 'Variable',
+ stockCode: 'Stock Code',
+ apiKeyPlaceholder:
+ 'YOUR_API_KEY (obtained from https://serpapi.com/manage-api-key)',
+ flowStart: 'Start',
+ flowNum: 'Num',
+ test: 'Test',
+ extractDepth: 'Extract Depth',
+ format: 'Format',
+ basic: 'basic',
+ advanced: 'advanced',
+ general: 'general',
+ searchDepth: 'Search Depth',
+ tavilyTopic: 'Tavily Topic',
+ maxResults: 'Max Results',
+ includeAnswer: 'Include Answer',
+ includeRawContent: 'Include Raw Content',
+ includeImages: 'Include Images',
+ includeImageDescriptions: 'Include Image Descriptions',
+ includeDomains: 'Include Domains',
+ ExcludeDomains: 'Exclude Domains',
+ Days: 'Days',
+ comma: 'Comma',
+ semicolon: 'Semicolon',
+ period: 'Period',
+ lineBreak: 'Line Break',
+ tab: 'Tab',
+ space: 'Space',
+ delimiters: 'Delimiters',
+ merge: 'Merge',
+ split: 'Split',
+ script: 'Script',
+ iterationItemDescription:
+ 'It represents the current element in the iteration, which can be referenced and manipulated in subsequent steps.',
+ guidingQuestion: 'Guidance Question',
+ onFailure: 'On Failure',
+ userPromptDefaultValue:
+ 'This is the order you need to send to the agent.',
+ search: 'Search',
+ communication: 'Communication',
+ developer: 'Developer',
+ typeCommandOrsearch: 'Type a command or search...',
+ builtIn: 'Built-in',
+ ExceptionDefaultValue: 'Exception default value',
+ exceptionMethod: 'Exception method',
+ maxRounds: 'Max reflection rounds',
+ delayEfterError: 'Delay after error',
+ maxRetries: 'Max reflection rounds',
+ advancedSettings: 'Advanced Settings',
+ addTools: 'Add Tools',
+ sysPromptDefultValue: `
+
+ You are a helpful assistant, an AI assistant specialized in problem-solving for the user.
+ If a specific domain is provided, adapt your expertise to that domain; otherwise, operate as a generalist.
+
+
+ 1. Understand the user’s request.
+ 2. Decompose it into logical subtasks.
+ 3. Execute each subtask step by step, reasoning transparently.
+ 4. Validate accuracy and consistency.
+ 5. Summarize the final result clearly.
+ `,
+ singleLineText: 'Single-line text',
+ multimodalModels: 'Multimodal Models',
+ textOnlyModels: 'Text-only Models',
+ allModels: 'All Models',
+ codeExecDescription: 'Write your custom Python or Javascript logic.',
+ stringTransformDescription:
+ 'Modifies text content. Currently supports: Splitting or concatenating text.',
+ foundation: 'Foundation',
+ tools: 'Tools',
+ dataManipulation: 'Data Manipulation',
+ flow: 'Flow',
+ dialog: 'Dialogue',
+ cite: 'Cite',
+ citeTip: 'citeTip',
+ name: 'Name',
+ nameMessage: 'Please input name',
+ description: 'Description',
+ descriptionMessage: 'This is an agent for a specific task.',
+ examples: 'Examples',
+ to: 'To',
+ msg: 'Messages',
+ msgTip:
+ 'Output the variable content of the upstream component or the text entered by yourself.',
+ messagePlaceholder: `Please enter your message content, use '/' to quickly insert variables.`,
+ messageMsg: 'Please input message or delete this field.',
+ addField: 'Add option',
+ addMessage: 'Add message',
+ loop: 'Loop',
+ loopTip:
+ 'Loop is the upper limit of the number of loops of the current component, when the number of loops exceeds the value of loop, it means that the component can not complete the current task, please re-optimize agent',
+ yes: 'Yes',
+ no: 'No',
+ key: 'Key',
+ componentId: 'Component ID',
+ add: 'Add',
+ operation: 'operation',
+ run: 'Run',
+ save: 'Save',
+ title: 'ID:',
+ beginDescription: 'This is where the flow begins.',
+ answerDescription: `A component that serves as the interface between human and bot, receiving user inputs and displaying the agent's responses.`,
+ retrievalDescription: `A component that retrieves information from specified knowledge bases (datasets). Ensure that the knowledge bases you select use the same embedding model.`,
+ generateDescription: `A component that prompts the LLM to generate responses. Ensure the prompt is set correctly.`,
+ categorizeDescription: `A component that uses the LLM to classify user inputs into predefined categories. Ensure you specify the name, description, and examples for each category, along with the corresponding next component.`,
+ relevantDescription: `A component that uses the LLM to assess whether the upstream output is relevant to the user's latest query. Ensure you specify the next component for each judge result.`,
+ rewriteQuestionDescription: `A component that rewrites a user query from the Interact component, based on the context of previous dialogues.`,
+ messageDescription:
+ 'This component returns the final data output of the workflow along with predefined message content. ',
+ keywordDescription: `A component that retrieves top N search results from user's input. Ensure the TopN value is set properly before use.`,
+ switchDescription: `A component that evaluates conditions based on the output of previous components and directs the flow of execution accordingly. It allows for complex branching logic by defining cases and specifying actions for each case or default action if no conditions are met.`,
+ wikipediaDescription: `A component that searches from wikipedia.org, using TopN to specify the number of search results. It supplements the existing knowledge bases.`,
+ promptText: `Please summarize the following paragraphs. Be careful with the numbers, do not make things up. Paragraphs as following:
+ {input}
+ The above is the content you need to summarize.`,
+ createGraph: 'Create agent',
+ createFromTemplates: 'Create from template',
+ retrieval: 'Retrieval',
+ generate: 'Generate',
+ answer: 'Interact',
+ categorize: 'Categorize',
+ relevant: 'Relevant',
+ rewriteQuestion: 'Rewrite',
+ rewrite: 'Rewrite',
+ begin: 'Begin',
+ message: 'Message',
+ blank: 'Blank',
+ createFromNothing: 'Create your agent from scratch',
+ addItem: 'Add Item',
+ addSubItem: 'Add Sub Item',
+ nameRequiredMsg: 'Name is required',
+ nameRepeatedMsg: 'The name cannot be repeated',
+ keywordExtract: 'Keyword',
+ keywordExtractDescription: `A component that extracts keywords from a user query, with Top N specifying the number of keywords to extract.`,
+ baidu: 'Baidu',
+ baiduDescription: `A component that searches from baidu.com, using TopN to specify the number of search results. It supplements the existing knowledge bases.`,
+ duckDuckGo: 'DuckDuckGo',
+ duckDuckGoDescription:
+ 'A component that searches from duckduckgo.com, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
+ searXNG: 'SearXNG',
+ searXNGDescription:
+ 'A component that searches via your provided SearXNG instance URL. Specify TopN and the instance URL.',
+ channel: 'Channel',
+ channelTip: `Perform text search or news search on the component's input`,
+ text: 'Text',
+ news: 'News',
+ messageHistoryWindowSize: 'Message window size',
+ messageHistoryWindowSizeTip:
+ 'The window size of conversation history visible to the LLM. Larger is better, but be mindful of the maximum token limit of LLM.',
+ wikipedia: 'Wikipedia',
+ pubMed: 'PubMed',
+ pubMedDescription:
+ 'A component that searches from https://pubmed.ncbi.nlm.nih.gov/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
+ email: 'Email',
+ emailTip:
+ 'E-mail is a required field. You must input an E-mail address here.',
+ arXiv: 'ArXiv',
+ arXivDescription:
+ 'A component that searches from https://arxiv.org/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases.',
+ sortBy: 'Sort by',
+ submittedDate: 'Submitted date',
+ lastUpdatedDate: 'Last updated date',
+ relevance: 'Relevance',
+ google: 'Google',
+ googleDescription:
+ 'A component that searches from https://www.google.com/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases. Please note that this requires an API key from serpapi.com.',
+ bing: 'Bing',
+ bingDescription:
+ 'A component that searches from https://www.bing.com/, allowing you to specify the number of search results using TopN. It supplements the existing knowledge bases. Please note that this requires an API key from microsoft.com.',
+ apiKey: 'API KEY',
+ country: 'Country & Region',
+ language: 'Language',
+ googleScholar: 'Google Scholar',
+ googleScholarDescription:
+ 'A component that searches https://scholar.google.com/. You can use Top N to specify the number of search results.',
+ yearLow: 'Year low',
+ yearHigh: 'Year high',
+ patents: 'Patents',
+ data: 'Data',
+ deepL: 'DeepL',
+ deepLDescription:
+ 'A component that gets more specialized translations from https://www.deepl.com/.',
+ authKey: 'Auth key',
+ sourceLang: 'Source language',
+ targetLang: 'Target language',
+ gitHub: 'GitHub',
+ gitHubDescription:
+ 'A component that searches for repositories from https://github.com/. You can use Top N to specify the number of search results.',
+ baiduFanyi: 'BaiduFanyi',
+ baiduFanyiDescription:
+ 'A component that gets specialized translations from https://fanyi.baidu.com/.',
+ appid: 'App ID',
+ secretKey: 'Secret key',
+ domain: 'Domain',
+ transType: 'Translation type',
+ baiduSecretKeyOptions: {
+ translate: 'General translation',
+ fieldtranslate: 'Field translation',
+ },
+ baiduDomainOptions: {
+ it: 'Information technology',
+ finance: 'Financial and economics',
+ machinery: 'Machinery manufacturing',
+ senimed: 'Biomedicine',
+ novel: 'Online literature',
+ academic: 'Academic paper',
+ aerospace: 'Aerospace',
+ wiki: 'Humanities and social sciences',
+ news: 'News and information',
+ law: 'Laws and regulations',
+ contract: 'Contract',
+ },
+ baiduSourceLangOptions: {
+ auto: 'Auto detect',
+ zh: 'Chinese',
+ en: 'English',
+ yue: 'Cantonese',
+ wyw: 'Classical Chinese',
+ jp: 'Japanese',
+ kor: 'Korean',
+ fra: 'French',
+ spa: 'Spanish',
+ th: 'Thai',
+ ara: 'Arabic',
+ ru: 'Russian',
+ pt: 'Portuguese',
+ de: 'German',
+ it: 'Italian',
+ el: 'Greek',
+ nl: 'Dutch',
+ pl: 'Polish',
+ bul: 'Bulgarian',
+ est: 'Estonian',
+ dan: 'Danish',
+ fin: 'Finnish',
+ cs: 'Czech',
+ rom: 'Romanian',
+ slo: 'Slovenian',
+ swe: 'Swedish',
+ hu: 'Hungarian',
+ cht: 'Traditional Chinese',
+ vie: 'Vietnamese',
+ },
+ qWeather: 'QWeather',
+ qWeatherDescription:
+ 'A component that retrieves weather information, such as temperature and air quality, from https://www.qweather.com/.',
+ lang: 'Language',
+ type: 'Type',
+ webApiKey: 'Web API key',
+ userType: 'User type',
+ timePeriod: 'Time period',
+ qWeatherLangOptions: {
+ zh: 'Simplified Chinese',
+ 'zh-hant': 'Traditional Chinese',
+ en: 'English',
+ de: 'German',
+ es: 'Spanish',
+ fr: 'French',
+ it: 'Italian',
+ ja: 'Japanese',
+ ko: 'Korean',
+ ru: 'Russian',
+ hi: 'Hindi',
+ th: 'Thai',
+ ar: 'Arabic',
+ pt: 'Portuguese',
+ bn: 'Bengali',
+ ms: 'Malay',
+ nl: 'Dutch',
+ el: 'Greek',
+ la: 'Latin',
+ sv: 'Swedish',
+ id: 'Indonesian',
+ pl: 'Polish',
+ tr: 'Turkish',
+ cs: 'Czech',
+ et: 'Estonian',
+ vi: 'Vietnamese',
+ fil: 'Filipino',
+ fi: 'Finnish',
+ he: 'Hebrew',
+ is: 'Icelandic',
+ nb: 'Norwegian',
+ },
+ qWeatherTypeOptions: {
+ weather: 'Weather forecast',
+ indices: 'Weather life index',
+ airquality: 'Air quality',
+ },
+ qWeatherUserTypeOptions: {
+ free: 'Free subscriber',
+ paid: 'Paid subscriber',
+ },
+ qWeatherTimePeriodOptions: {
+ now: 'Now',
+ '3d': '3 days',
+ '7d': '7 days',
+ '10d': '10 days',
+ '15d': '12 days',
+ '30d': '30 days',
+ },
+ publish: 'API',
+ exeSQL: 'Execute SQL',
+ exeSQLDescription:
+ 'A component that performs SQL queries on a relational database, supporting querying from MySQL, PostgreSQL, or MariaDB.',
+ dbType: 'Database Type',
+ database: 'Database',
+ username: 'Username',
+ host: 'Host',
+ port: 'Port',
+ password: 'Password',
+ switch: 'Switch',
+ logicalOperator: 'Logical operator',
+ switchOperatorOptions: {
+ equal: 'Equals',
+ notEqual: 'Not equal',
+ gt: 'Greater than',
+ ge: 'Greater equal',
+ lt: 'Less than',
+ le: 'Less equal',
+ contains: 'Contains',
+ notContains: 'Not contains',
+ startWith: 'Starts with',
+ endWith: 'Ends with',
+ empty: 'Is empty',
+ notEmpty: 'Not empty',
+ },
+ switchLogicOperatorOptions: {
+ and: 'AND',
+ or: 'OR',
+ },
+ operator: 'Operator',
+ value: 'Value',
+ useTemplate: 'Use',
+ wenCai: 'WenCai',
+ queryType: 'Query type',
+ wenCaiDescription:
+ 'A component that obtains financial information, including stock prices and funding news, from a wide range of financial websites.',
+ wenCaiQueryTypeOptions: {
+ stock: 'stock',
+ zhishu: 'index',
+ fund: 'fund',
+ hkstock: 'Hong Kong shares',
+ usstock: 'US stock market',
+ threeboard: 'New OTC Market',
+ conbond: 'Convertible Bond',
+ insurance: 'insurance',
+ futures: 'futures',
+ lccp: 'Financing',
+ foreign_exchange: 'Foreign currency',
+ },
+ akShare: 'AkShare',
+ akShareDescription:
+ 'A component that obtains news about stocks from https://www.eastmoney.com/.',
+ yahooFinance: 'YahooFinance',
+ yahooFinanceDescription:
+ 'A component that queries information about a publicly traded company using its ticker symbol.',
+ crawler: 'Web Crawler',
+ crawlerDescription:
+ 'A component that crawls HTML source code from a specified URL.',
+ proxy: 'Proxy',
+ crawlerResultOptions: {
+ html: 'Html',
+ markdown: 'Markdown',
+ content: 'Content',
+ },
+ extractType: 'Extract type',
+ info: 'Info',
+ history: 'History',
+ financials: 'Financials',
+ balanceSheet: 'Balance sheet',
+ cashFlowStatement: 'Cash flow statement',
+ jin10: 'Jin10',
+ jin10Description:
+ 'A component that retrieves financial information from the Jin10 Open Platform, including news updates, calendars, quotes, and references.',
+ flashType: 'Flash type',
+ filter: 'Filter',
+ contain: 'Contain',
+ calendarType: 'Calendar type',
+ calendarDatashape: 'Calendar datashape',
+ symbolsDatatype: 'Symbols datatype',
+ symbolsType: 'Symbols type',
+ jin10TypeOptions: {
+ flash: 'Quick News',
+ calendar: 'Calendar',
+ symbols: 'quotes',
+ news: 'reference',
+ },
+ jin10FlashTypeOptions: {
+ '1': 'Market News',
+ '2': ' Futures News',
+ '3': 'US-Hong Kong News',
+ '4': 'A-Share News',
+ '5': 'Commodities & Forex News',
+ },
+ jin10CalendarTypeOptions: {
+ cj: 'Macroeconomic Data Calendar',
+ qh: ' Futures Calendar',
+ hk: 'Hong Kong Stock Market Calendar',
+ us: 'US Stock Market Calendar',
+ },
+ jin10CalendarDatashapeOptions: {
+ data: 'Data',
+ event: ' Event',
+ holiday: 'Holiday',
+ },
+ jin10SymbolsTypeOptions: {
+ GOODS: 'Commodity Quotes',
+ FOREX: ' Forex Quotes',
+ FUTURE: 'International Market Quotes',
+ CRYPTO: 'Cryptocurrency Quotes',
+ },
+ jin10SymbolsDatatypeOptions: {
+ symbols: 'Commodity List',
+ quotes: ' Latest Market Quotes',
+ },
+ concentrator: 'Concentrator',
+ concentratorDescription:
+ 'A component that receives the output from the upstream component and passes it on as input to the downstream components.',
+ tuShare: 'TuShare',
+ tuShareDescription:
+ 'A component that obtains financial news briefs from mainstream financial websites, aiding industry and quantitative research.',
+ tuShareSrcOptions: {
+ sina: 'Sina',
+ wallstreetcn: 'wallstreetcn',
+ '10jqka': 'Straight flush',
+ eastmoney: 'Eastmoney',
+ yuncaijing: 'YUNCAIJING',
+ fenghuang: 'FENGHUANG',
+ jinrongjie: 'JRJ',
+ },
+ token: 'Token',
+ src: 'Source',
+ startDate: 'Start date',
+ endDate: 'End date',
+ keyword: 'Keyword',
+ note: 'Note',
+ noteDescription: 'Note',
+ notePlaceholder: 'Please enter a note',
+ invoke: 'HTTP Request',
+ invokeDescription: `A component capable of calling remote services, using other components' outputs or constants as inputs.`,
+ url: 'Url',
+ method: 'Method',
+ timeout: 'Timeout',
+ headers: 'Headers',
+ cleanHtml: 'Clean HTML',
+ cleanHtmlTip:
+ 'If the response is HTML formatted and only the primary content wanted, please toggle it on.',
+ reference: 'Reference',
+ input: 'Input',
+ output: 'Output',
+ parameter: 'Parameter',
+ howUseId: 'How to use agent ID?',
+ content: 'Content',
+ operationResults: 'Operation Results',
+ autosaved: 'Autosaved',
+ optional: 'Optional',
+ pasteFileLink: 'Paste file link',
+ testRun: 'Test Run',
+ template: 'Template',
+ templateDescription:
+ 'A component that formats the output of other components.1. Supports Jinja2 templates, will first convert the input to an object and then render the template, 2. Simultaneously retains the original method of using {parameter} string replacement',
+ emailComponent: 'Email',
+ emailDescription: 'Send an email to a specified address.',
+ smtpServer: 'SMTP Server',
+ smtpPort: 'SMTP Port',
+ senderEmail: 'Sender Email',
+ authCode: 'Authorization Code',
+ senderName: 'Sender Name',
+ toEmail: 'Recipient Email',
+ ccEmail: 'CC Email',
+ emailSubject: 'Subject',
+ emailContent: 'Content',
+ smtpServerRequired: 'Please input SMTP server address',
+ senderEmailRequired: 'Please input sender email',
+ authCodeRequired: 'Please input authorization code',
+ toEmailRequired: 'Please input recipient email',
+ emailContentRequired: 'Please input email content',
+ emailSentSuccess: 'Email sent successfully',
+ emailSentFailed: 'Failed to send email',
+ dynamicParameters: 'Dynamic Parameters',
+ jsonFormatTip:
+ 'Upstream component should provide JSON string in following format:',
+ toEmailTip: 'to_email: Recipient email (Required)',
+ ccEmailTip: 'cc_email: CC email (Optional)',
+ subjectTip: 'subject: Email subject (Optional)',
+ contentTip: 'content: Email content (Optional)',
+ jsonUploadTypeErrorMessage: 'Please upload json file',
+ jsonUploadContentErrorMessage: 'json file error',
+ iteration: 'Iteration',
+ iterationDescription: `A looping component that iterates over an input array and executes a defined logic for each item.`,
+ delimiterTip: `
+This delimiter is used to split the input text into several text pieces echo of which will be performed as input item of each iteration.`,
+ delimiterOptions: {
+ comma: 'Comma',
+ lineBreak: 'Line break',
+ tab: 'Tab',
+ underline: 'Underline',
+ diagonal: 'Forward slash',
+ minus: 'Dash',
+ semicolon: 'Semicolon',
+ },
+ addVariable: 'Add variable',
+ variableSettings: 'Variable settings',
+ globalVariables: 'Global variables',
+ systemPrompt: 'System prompt',
+ userPrompt: 'User prompt',
+ addCategory: 'Add category',
+ categoryName: 'Category name',
+ nextStep: 'Next step',
+ variableExtractDescription:
+ 'Extract user information into global variable throughout the conversation',
+ variableExtract: 'Variables',
+ variables: 'Variables',
+ variablesTip: `Set the clear json key variable with a value of empty. e.g.
+ {
+ "UserCode":"",
+ "NumberPhone":""
+ }`,
+ datatype: 'MINE type of the HTTP request',
+ insertVariableTip: `Enter / Insert variables`,
+ historyversion: 'Version history',
+ filename: 'File name',
+ version: {
+ created: 'Created',
+ details: 'Version details',
+ dsl: 'DSL',
+ download: 'Download',
+ version: 'Version',
+ select: 'No version selected',
+ },
+ setting: 'Settings',
+ settings: {
+ agentSetting: 'Agent settings',
+ title: 'title',
+ description: 'description',
+ upload: 'Upload',
+ photo: 'Photo',
+ permissions: 'Permissions',
+ permissionsTip: 'You can set the permissions of the team members here.',
+ me: 'me',
+ team: 'Team',
+ },
+ noMoreData: 'No more data',
+ searchAgentPlaceholder: 'Search agent',
+ footer: {
+ profile: 'All rights reserved @ React',
+ },
+ layout: {
+ file: 'file',
+ knowledge: 'knowledge',
+ chat: 'chat',
+ },
+ prompt: 'Prompt',
+ promptTip:
+ 'Use the system prompt to describe the task for the LLM, specify how it should respond, and outline other miscellaneous requirements. The system prompt is often used in conjunction with keys (variables), which serve as various data inputs for the LLM. Use a forward slash `/` or the (x) button to show the keys to use.',
+ promptMessage: 'Prompt is required',
+ infor: 'Information run',
+ knowledgeBasesTip:
+ 'Select the knowledge bases to associate with this chat assistant, or choose variables containing knowledge base IDs below.',
+ knowledgeBaseVars: 'Knowledge base variables',
+ code: 'Code',
+ codeDescription: 'It allows developers to write custom Python logic.',
+ inputVariables: 'Input variables',
+ runningHintText: 'is running...🕞',
+ openingSwitch: 'Opening switch',
+ openingCopy: 'Opening greeting',
+ openingSwitchTip:
+ 'Your users will see this welcome message at the beginning.',
+ modeTip: 'The mode defines how the workflow is initiated.',
+ mode: 'Mode',
+ conversational: 'conversational',
+ task: 'task',
+ beginInputTip:
+ 'By defining input parameters, this content can be accessed by other components in subsequent processes.',
+ query: 'Query variables',
+ queryTip: 'Select the variable you want to use',
+ agent: 'Agent',
+ addAgent: 'Add Agent',
+ agentDescription:
+ 'Builds agent components equipped with reasoning, tool usage, and multi-agent collaboration. ',
+ maxRecords: 'Max records',
+ createAgent: 'Agent flow',
+ stringTransform: 'Text Processing',
+ userFillUp: 'Await Response',
+ userFillUpDescription: `Pauses the workflow and waits for the user's message before continuing.`,
+ codeExec: 'Code',
+ tavilySearch: 'Tavily Search',
+ tavilySearchDescription: 'Search results via Tavily service.',
+ tavilyExtract: 'Tavily Extract',
+ tavilyExtractDescription: 'Tavily Extract',
+ log: 'Log',
+ management: 'Management',
+ import: 'Import',
+ export: 'Export',
+ seconds: 'Seconds',
+ subject: 'Subject',
+ tag: 'Tag',
+ tagPlaceholder: 'Please enter tag',
+ descriptionPlaceholder: 'Please enter description',
+ line: 'Single-line text',
+ paragraph: 'Paragraph text',
+ options: 'Dropdown options',
+ file: 'File upload',
+ integer: 'Number',
+ boolean: 'Boolean',
+
+ logTimeline: {
+ begin: 'Ready to begin',
+ agent: 'Agent is thinking',
+ userFillUp: 'Waiting for you',
+ retrieval: 'Looking up knowledge',
+ message: 'Agent says',
+ awaitResponse: 'Waiting for you',
+ switch: 'Choosing the best path',
+ iteration: 'Batch processing',
+ categorize: 'Categorising info',
+ code: 'Running a quick script',
+ textProcessing: 'Tidying up text',
+ tavilySearch: 'Searching the web',
+ tavilyExtract: 'Reading the page',
+ exeSQL: 'Querying database',
+ google: 'Searching the web',
+ wikipedia: 'Searching Wikipedia',
+ googleScholar: 'Academic search',
+ gitHub: 'Searching GitHub',
+ email: 'Sending email',
+ httpRequest: 'Calling an API',
+ wenCai: 'Querying financial data',
+ },
+ goto: 'Fail Branch',
+ comment: 'Default Value',
+ sqlStatement: 'SQL Statement',
+ sqlStatementTip:
+ 'Write your SQL query here. You can use variables, raw SQL, or mix both using variable syntax.',
+ frameworkPrompts: 'Framework',
+ release: 'Publish',
+ createFromBlank: 'Create from blank',
+ createFromTemplate: 'Create from template',
+ importJsonFile: 'Import JSON file',
+ ceateAgent: 'Agent flow',
+ createPipeline: 'Data pipeline',
+ chooseAgentType: 'Choose Agent Type',
+ },
+ llmTools: {
+ bad_calculator: {
+ name: 'Calculator',
+ description:
+ 'A tool to calculate the sum of two numbers (will give wrong answer)',
+ params: {
+ a: 'The first number',
+ b: 'The second number',
+ },
+ },
+ },
+ modal: {
+ okText: 'Confirm',
+ cancelText: 'Cancel',
+ },
+ mcp: {
+ export: 'Export',
+ import: 'Import',
+ url: 'URL',
+ serverType: 'Server Type',
+ addMCP: 'Add MCP',
+ editMCP: 'Edit MCP',
+ toolsAvailable: 'tools available',
+ mcpServers: 'MCP Servers',
+ customizeTheListOfMcpServers: 'Customize the list of MCP servers',
+ },
+ search: {
+ searchApps: 'Search Apps',
+ createSearch: 'Create Search',
+ searchGreeting: 'How can I help you today ?',
+ profile: 'Hide Profile',
+ locale: 'Locale',
+ embedCode: 'Embed code',
+ id: 'ID',
+ copySuccess: 'Copy Success',
+ welcomeBack: 'Welcome back',
+ searchSettings: 'Search Settings',
+ name: 'Name',
+ avatar: 'Avatar',
+ description: 'Description',
+ datasets: 'Datasets',
+ rerankModel: 'Rerank Model',
+ AISummary: 'AI Summary',
+ enableWebSearch: 'Enable Web Search',
+ enableRelatedSearch: 'Enable Related Search',
+ showQueryMindmap: 'Show Query Mindmap',
+ embedApp: 'Embed App',
+ relatedSearch: 'Related Search',
+ descriptionValue: 'You are an intelligent assistant.',
+ okText: 'Save',
+ cancelText: 'Cancel',
+ chooseDataset: 'Please select a dataset first',
+ },
+ language: {
+ english: 'English',
+ chinese: 'Chinese',
+ spanish: 'Spanish',
+ french: 'French',
+ german: 'German',
+ japanese: 'Japanese',
+ korean: 'Korean',
+ vietnamese: 'Vietnamese',
+ },
+ pagination: {
+ total: 'Total {{total}}',
+ page: '{{page}} /Page',
+ },
+ dataflowParser: {
+ parseSummary: 'Parse Summary',
+ parseSummaryTip: 'Parser:deepdoc',
+ rerunFromCurrentStep: 'Rerun From Current Step',
+ rerunFromCurrentStepTip: 'Changes detected. Click to re-run.',
+ confirmRerun: 'Confirm Rerun Process',
+ confirmRerunModalContent: `
+
+ You are about to rerun the process starting from the {{step}} step.
+
+ This will:
+
+ - Overwrite existing results from the current step onwards
+ - Create a new log entry for tracking
+ - Previous steps will remain unchanged
+
`,
+ changeStepModalTitle: 'Step Switch Warning',
+ changeStepModalContent: `
+ You are currently editing the results of this stage.
+ If you switch to a later stage, your changes will be lost.
+ To keep them, please click Rerun to re-run the current stage.
`,
+ changeStepModalConfirmText: 'Switch Anyway',
+ changeStepModalCancelText: 'Cancel',
+ unlinkPipelineModalTitle: 'Unlink data pipeline',
+ unlinkPipelineModalContent: `
+ Once unlinked, this Dataset will no longer be connected to the current Data Pipeline.
+ Files that are already being parsed will continue until completion
+ Files that are not yet parsed will no longer be processed
+ Are you sure you want to proceed?
`,
+ unlinkPipelineModalConfirmText: 'Unlink',
+ },
+ dataflow: {
+ parser: 'Parser',
+ parserDescription:
+ 'Extracts raw text and structure from files for downstream processing.',
+ tokenizer: 'Tokenizer',
+ tokenizerRequired: 'Please add the Tokenizer node first',
+ tokenizerDescription:
+ 'Transforms text into the required data structure (e.g., vector embeddings for Embedding Search) depending on the chosen search method.',
+ splitter: 'Token Splitter',
+ splitterDescription:
+ 'Split text into chunks by token length with optional delimiters and overlap.',
+ hierarchicalMergerDescription:
+ 'Split documents into sections by title hierarchy with regex rules for finer control.',
+ hierarchicalMerger: 'Title Splitter',
+ extractor: 'Context Generator',
+ extractorDescription:
+ 'Use an LLM to extract structured insights from document chunks—such as summaries, classifications, etc.',
+ outputFormat: 'Output format',
+ lang: 'Language',
+ fileFormats: 'File format',
+ fileFormatOptions: {
+ pdf: 'PDF',
+ spreadsheet: 'Spreadsheet',
+ image: 'Image',
+ email: 'Email',
+ 'text&markdown': 'Text & Markup',
+ word: 'Word',
+ slides: 'PPT',
+ audio: 'Audio',
+ },
+ fields: 'Field',
+ addParser: 'Add Parser',
+ hierarchy: 'Hierarchy',
+ regularExpressions: 'Regular Expressions',
+ overlappedPercent: 'Overlapped percent',
+ searchMethod: 'Search method',
+ begin: 'File',
+ parserMethod: 'Parsing method',
+ systemPrompt: 'System Prompt',
+ systemPromptPlaceholder:
+ 'Enter system prompt for image analysis, if empty the system default value will be used',
+ exportJson: 'Export JSON',
+ viewResult: 'View Result',
+ running: 'Running',
+ summary: 'Augmented Context',
+ keywords: 'Keywords',
+ questions: 'Questions',
+ metadata: 'Metadata',
+ fieldName: 'Result Destination',
+ prompts: {
+ system: {
+ keywords: `Role
+You are a text analyzer.
+
+Task
+Extract the most important keywords/phrases of a given piece of text content.
+
+Requirements
+- Summarize the text content, and give the top 5 important keywords/phrases.
+- The keywords MUST be in the same language as the given piece of text content.
+- The keywords are delimited by ENGLISH COMMA.
+- Output keywords ONLY.`,
+ questions: `Role
+You are a text analyzer.
+
+Task
+Propose 3 questions about a given piece of text content.
+
+Requirements
+- Understand and summarize the text content, and propose the top 3 important questions.
+- The questions SHOULD NOT have overlapping meanings.
+- The questions SHOULD cover the main content of the text as much as possible.
+- The questions MUST be in the same language as the given piece of text content.
+- One question per line.
+- Output questions ONLY.`,
+ summary: `Act as a precise summarizer. Your task is to create a summary of the provided content that is both concise and faithful to the original.
+
+Key Instructions:
+1. Accuracy: Strictly base the summary on the information given. Do not introduce any new facts, conclusions, or interpretations that are not explicitly stated.
+2. Language: Write the summary in the same language as the source text.
+3. Objectivity: Present the key points without bias, preserving the original intent and tone of the content. Do not editorialize.
+4. Conciseness: Focus on the most important ideas, omitting minor details and fluff.`,
+ metadata: `Extract important structured information from the given content. Output ONLY a valid JSON string with no additional text. If no important structured information is found, output an empty JSON object: {}.
+
+Important structured information may include: names, dates, locations, events, key facts, numerical data, or other extractable entities.`,
+ },
+ user: {
+ keywords: `Text Content
+[Insert text here]`,
+ questions: `Text Content
+[Insert text here]`,
+ summary: `Text to Summarize:
+[Insert text here]`,
+ metadata: `Content: [INSERT CONTENT HERE]`,
+ },
+ },
+ cancel: 'Cancel',
+ swicthPromptMessage:
+ 'The prompt word will change. Please confirm whether to abandon the existing prompt word?',
+ tokenizerSearchMethodOptions: {
+ full_text: 'Full-text',
+ embedding: 'Embedding',
+ },
+ filenameEmbeddingWeight: 'Filename embedding weight',
+ tokenizerFieldsOptions: {
+ text: 'Processed Text',
+ keywords: 'Keywords',
+ questions: 'Questions',
+ summary: 'Augmented Context',
+ },
+ imageParseMethodOptions: {
+ ocr: 'OCR',
+ },
+ },
+ datasetOverview: {
+ downloadTip: 'Files being downloaded from data sources. ',
+ processingTip: 'Files being processed by data flows.',
+ totalFiles: 'Total Files',
+ downloading: 'Downloading',
+ processing: 'Processing',
},
},
-};
\ No newline at end of file
+};
diff --git a/src/locales/index.ts b/src/locales/index.ts
index 3cfcce7..63cd076 100644
--- a/src/locales/index.ts
+++ b/src/locales/index.ts
@@ -11,7 +11,9 @@ import zh from './zh';
export const LanguageAbbreviation = Object.freeze({
En: 'en',
Zh: 'zh',
-})
+} as const)
+
+export type LanguageAbbreviationType = (typeof LanguageAbbreviation)[keyof typeof LanguageAbbreviation]
const resources = {
[LanguageAbbreviation.En]: en,
diff --git a/src/locales/zh.ts b/src/locales/zh.ts
index 5e7ef46..a572831 100644
--- a/src/locales/zh.ts
+++ b/src/locales/zh.ts
@@ -197,7 +197,7 @@ export default {
'使用视觉模型进行 PDF 布局分析,以更好地识别文档结构,找到标题、文本块、图像和表格的位置。 如果选择 Naive 选项,则只能获取 PDF 的纯文本。请注意该功能只适用于 PDF 文档,对其他文档不生效。欲了解更多信息,请参阅 https://ragflow.io/docs/dev/select_pdf_parser。',
taskPageSize: '任务页面大小',
taskPageSizeMessage: '请输入您的任务页面大小!',
- taskPageSizeTip: `如果使用布局识别,PDF 文件将被分成连续的组。 布局分析将在组之间并行执行,以提高处理速度。 "任务页面大小"决定组的大小。 页面大小越大,将页面之间的连续文本分割成不同块的机会就越低。`,
+ taskPageSizeTip: `如果使用布局识别,PDF 文件将被分成连续的组。 布局分析将在组之间并行执行,以提高处理速度。 “任务页面大小”决定组的大小。 页面大小越大,将页面之间的连续文本分割成不同块的机会就越低。`,
addPage: '新增页面',
greaterThan: '当前值必须大于起始值!',
greaterThanPrevious: '当前值必须大于之前的值!',
@@ -221,9 +221,9 @@ export default {
html4excel: '表格转HTML',
html4excelTip: `与 General 切片方法配合使用。未开启状态下,表格文件(XLSX、XLS(Excel 97-2003))会按行解析为键值对。开启后,表格文件会被解析为 HTML 表格。若原始表格超过 12 行,系统会自动按每 12 行拆分为多个 HTML 表格。欲了解更多详情,请参阅 https://ragflow.io/docs/dev/enable_excel2html。`,
autoKeywords: '自动关键词提取',
- autoKeywordsTip: `自动为每个文本块中提取 N 个关键词,用以提升查询精度。请注意:该功能采用"系统模型设置"中设置的默认聊天模型提取关键词,因此也会产生更多 Token 消耗。另外,你也可以手动更新生成的关键词。详情请见 https://ragflow.io/docs/dev/autokeyword_autoquestion。`,
+ autoKeywordsTip: `自动为每个文本块中提取 N 个关键词,用以提升查询精度。请注意:该功能采用“系统模型设置”中设置的默认聊天模型提取关键词,因此也会产生更多 Token 消耗。另外,你也可以手动更新生成的关键词。详情请见 https://ragflow.io/docs/dev/autokeyword_autoquestion。`,
autoQuestions: '自动问题提取',
- autoQuestionsTip: `利用"系统模型设置"中设置的 chat model 对知识库的每个文本块提取 N 个问题以提高其排名得分。请注意,开启后将消耗额外的 token。您可以在块列表中查看、编辑结果。如果自动问题提取发生错误,不会妨碍整个分块过程,只会将空结果添加到原始文本块。详情请见 https://ragflow.io/docs/dev/autokeyword_autoquestion。`,
+ autoQuestionsTip: `利用“系统模型设置”中设置的 chat model 对知识库的每个文本块提取 N 个问题以提高其排名得分。请注意,开启后将消耗额外的 token。您可以在块列表中查看、编辑结果。如果自动问题提取发生错误,不会妨碍整个分块过程,只会将空结果添加到原始文本块。详情请见 https://ragflow.io/docs/dev/autokeyword_autoquestion。`,
redo: '是否清空已有 {{chunkNum}}个 chunk?',
setMetaData: '设置元数据',
pleaseInputJson: '请输入JSON',
@@ -296,7 +296,7 @@ export default {
embeddingModelTip:
'知识库采用的默认嵌入模型。 一旦知识库内已经产生了文本块后,你将无法更改默认的嵌入模型,除非删除知识库内的所有文本块。',
permissionsTip:
- '如果把知识库权限设为"团队",则所有团队成员都可以操作该知识库。',
+ '如果把知识库权限设为“团队”,则所有团队成员都可以操作该知识库。',
chunkTokenNumberTip:
'建议的生成文本块的 token 数阈值。如果切分得到的小文本段 token 数达不到这一阈值就会不断与之后的文本段合并,直至再合并下一个文本段会超过这一阈值为止,此时产生一个最终文本块。如果系统在切分文本段时始终没有遇到文本分段标识符,即便文本段 token 数已经超过这一阈值,系统也不会生成新文本块。',
chunkMethod: '切片方法',
@@ -332,13 +332,13 @@ export default {
此方法将简单的方法应用于块文件:
系统将使用视觉检测模型将连续文本分割成多个片段。
- 接下来,这些连续的片段被合并成Token数不超过"Token数"的块。`,
+ 接下来,这些连续的片段被合并成Token数不超过“Token数”的块。`,
paper: `仅支持PDF文件。
如果我们的模型运行良好,论文将按其部分进行切片,例如摘要、1.1、1.2等。
这样做的好处是LLM可以更好的概括论文中相关章节的内容,
产生更全面的答案,帮助读者更好地理解论文。
缺点是它增加了 LLM 对话的背景并增加了计算成本,
- 所以在对话过程中,你可以考虑减少'topN'的设置。
`,
+ 所以在对话过程中,你可以考虑减少‘topN’的设置。`,
presentation: `支持的文件格式为PDF、PPTX。
每个页面都将被视为一个块。 并且每个页面的缩略图都会被存储。
您上传的所有PPT文件都会使用此方法自动分块,无需为每个PPT文件进行设置。
`,
@@ -392,35 +392,1317 @@ export default {
如果OCR提取的文本不够,可以使用视觉LLM来获取描述。
`,
one: `
- 支持的文件格式为MD、MDX、DOCX、XLSX、XLS (Excel 97-2003)、PPT、PDF、TXT、JPEG、JPG、PNG、TIF、GIF、CSV、JSON、EML、HTML。
- 此方法将整个文档视为一个块。
`,
- email: `支持的文件格式为EML。
- 每封电子邮件都将被视为一个块。
+
支持的文件格式为DOCX、EXCEL、PDF、TXT。
+
+ 对于一个文档,它将被视为一个完整的块,根本不会被分割。
+
+ 如果你要总结的东西需要一篇文章的全部上下文,并且所选LLM的上下文长度覆盖了文档长度,你可以尝试这种方法。
`,
- knowledgeGraph: `支持的文件格式为MD、MDX、DOCX、XLSX、XLS (Excel 97-2003)、PPT、PDF、TXT、JPEG、JPG、PNG、TIF、GIF、CSV、JSON、EML、HTML。
- 此方法将从文档中提取实体和关系,并将它们存储在知识图谱中。
`,
+ knowledgeGraph: `支持的文件格式为DOCX、EXCEL、PPT、IMAGE、PDF、TXT、MD、JSON、EML
+
+
文件分块后,使用分块提取整个文档的知识图谱和思维导图。此方法将简单的方法应用于分块文件:
+连续的文本将被切成大约 512 个 token 数的块。
+接下来,将分块传输到 LLM 以提取知识图谱和思维导图的节点和关系。
+
+注意您需要指定的条目类型。`,
+ tag: `使用“Tag”分块方法的知识库用作标签集.其他知识库可以把标签集当中的标签按照相似度匹配到自己对应的文本块中,对这些知识库的查询也将根据此标签集对自己进行标记。
+标签集不会直接参与 RAG 检索过程。
+标签集中的每个文本分块是都是相互独立的标签和标签描述的文本对。
+
+Tag 分块方法支持XLSX和CSV/TXT文件格式。
+如果文件为XLSX格式,则它应该包含两列无标题:一列用于标签描述,另一列用于标签,标签描述列位于标签列之前。支持多个工作表,只要列结构正确即可。
+如果文件为 CSV/TXT 格式,则必须使用 UTF-8 编码并以 TAB 作为分隔符来分隔内容和标签。
+在标签列中,标签之间使用英文逗号分隔。
+不符合上述规则的文本行将被忽略。
+`,
+ useRaptor: '使用召回增强 RAPTOR 策略',
+ useRaptorTip:
+ '为多跳问答任务启用 RAPTOR,详情请见 : https://ragflow.io/docs/dev/enable_raptor。',
+ prompt: '提示词',
+ promptMessage: '提示词是必填项',
+ promptText: `请总结以下段落。 小心数字,不要编造。 段落如下:
+ {cluster_content}
+以上就是你需要总结的内容。`,
+ maxToken: '最大token数',
+ maxTokenMessage: '最大token数是必填项',
+ threshold: '阈值',
+ thresholdMessage: '阈值是必填项',
+ maxCluster: '最大聚类数',
+ maxClusterMessage: '最大聚类数是必填项',
+ randomSeed: '随机种子',
+ randomSeedMessage: '随机种子是必填项',
+ promptTip:
+ '系统提示为大模型提供任务描述、规定回复方式,以及设置其他各种要求。系统提示通常与 key (变量)合用,通过变量设置大模型的输入数据。你可以通过斜杠或者 (x) 按钮显示可用的 key。',
+ maxTokenTip: '用于设定每个被总结的文本块的最大 token 数。',
+ thresholdTip:
+ '在 RAPTOR 中,数据块会根据它们的语义相似性进行聚类。阈值设定了数据块被分到同一组所需的最小相似度。阈值越高,每个聚类中的数据块越少;阈值越低,则每个聚类中的数据块越多。',
+ maxClusterTip: '最多可创建的聚类数。',
+ entityTypes: '实体类型',
+ pageRank: '页面排名',
+ pageRankTip: `知识库检索时,你可以为特定知识库设置较高的 PageRank 分数,该知识库中匹配文本块的混合相似度得分会自动叠加 PageRank 分数,从而提升排序权重。详见 https://ragflow.io/docs/dev/set_page_rank。`,
+ tagName: '标签',
+ frequency: '频次',
+ searchTags: '搜索标签',
+ tagCloud: '云',
+ tagTable: '表',
+ tagSet: '标签集',
+ topnTags: 'Top-N 标签',
+ tagSetTip: `
+ 请选择一个或多个标签集或标签知识库,用于对知识库中的每个文本块进行标记。
+ 对这些文本块的查询也将自动关联相应标签。
+ 此功能基于文本相似度,能够为数据集的文本块批量添加更多领域知识,从而显著提高检索准确性。该功能还能提升大量文本块的操作效率。
+ 为了更好地理解标签集的作用,以下是标签集和关键词之间的主要区别:
+
+ - 标签集是一个由用户定义和管理的封闭集,而自动生成的关键词属于开放集合。
+ - 在给你的知识库文本块批量打标签之前,你需要先生成标签集作为样本。
+ - 自动关键词提取功能中的关键词由 LLM 生成,此过程相对耗时,并且会产生一定的 Token 消耗。
+
+ 详见:https://ragflow.io/docs/dev/use_tag_sets
+ `,
+ tags: '标签',
+ addTag: '增加标签',
+ useGraphRag: '提取知识图谱',
+ useGraphRagTip:
+ '基于知识库内所有切好的文本块构建知识图谱,用以提升多跳和复杂问题回答的正确率。请注意:构建知识图谱将消耗大量 token 和时间。详见 https://ragflow.io/docs/dev/construct_knowledge_graph。',
+ graphRagMethod: '方法',
+ graphRagMethodTip: `Light:实体和关系提取提示来自 GitHub - HKUDS/LightRAG:“LightRAG:简单快速的检索增强生成”
+General:实体和关系提取提示来自 GitHub - microsoft/graphrag:基于图的模块化检索增强生成 (RAG) 系统`,
+ resolution: '实体归一化',
+ resolutionTip: `解析过程会将具有相同含义的实体合并在一起,从而使知识图谱更简洁、更准确。应合并以下实体:特朗普总统、唐纳德·特朗普、唐纳德·J·特朗普、唐纳德·约翰·特朗普`,
+ community: '社区报告生成',
+ communityTip:
+ '区块被聚集成层次化的社区,实体和关系通过更高抽象层次将每个部分连接起来。然后,我们使用 LLM 生成每个社区的摘要,称为社区报告。更多信息:https://www.microsoft.com/en-us/research/blog/graphrag-improving-global-search-via-dynamic-community-selection/',
},
- dashboard: {
- title: '仪表板',
- knowledgeBaseStatus: '知识库状态',
- documents: '文档',
- sources: '来源',
- vectors: '向量',
- recentActivity: '最近活动',
- noActivity: '暂无活动',
- systemHealth: '系统健康',
- healthy: '健康',
- warning: '警告',
- error: '错误',
+ chunk: {
+ chunk: '解析块',
+ bulk: '批量',
+ selectAll: '选择所有',
+ enabledSelected: '启用选定的',
+ disabledSelected: '禁用选定的',
+ deleteSelected: '删除选定的',
+ search: '搜索',
+ all: '所有',
+ enabled: '启用',
+ disabled: '禁用',
+ keyword: '关键词',
+ function: '函数',
+ chunkMessage: '请输入值!',
+ full: '全文',
+ ellipse: '省略',
+ graph: '知识图谱',
+ mind: '思维导图',
+ question: '问题',
+ questionTip: `如果有给定的问题,则块的嵌入将基于它们。`,
+ chunkResult: '切片结果',
+ chunkResultTip: `查看用于嵌入和召回的切片段落。`,
+ enable: '启用',
+ disable: '禁用',
+ delete: '删除',
},
- time: {
- justNow: '刚刚',
- minutesAgo: '{{count}} 分钟前',
- hoursAgo: '{{count}} 小时前',
- daysAgo: '{{count}} 天前',
- weeksAgo: '{{count}} 周前',
- monthsAgo: '{{count}} 个月前',
- yearsAgo: '{{count}} 年前',
+ chat: {
+ messagePlaceholder: '请输入消息...',
+ exit: '退出',
+ multipleModels: '多模型',
+ applyModelConfigs: '应用模型配置',
+ conversations: '会话',
+ chatApps: '聊天',
+ createChat: '创建聊天',
+ newConversation: '新会话',
+ createAssistant: '新建助理',
+ assistantSetting: '助理设置',
+ promptEngine: '提示引擎',
+ modelSetting: '模型设置',
+ chat: '聊天',
+ newChat: '新建聊天',
+ send: '发送',
+ sendPlaceholder: '给助理发送消息...',
+ chatConfiguration: '聊天配置',
+ chatConfigurationDescription: '为你的知识库配置专属聊天助手! 💕',
+ assistantName: '助理姓名',
+ assistantNameMessage: '助理姓名是必填项',
+ namePlaceholder: '例如 贾维斯简历',
+ assistantAvatar: '助理头像',
+ language: '语言',
+ emptyResponse: '空回复',
+ emptyResponseTip: `如果在知识库中没有检索到用户的问题,它将使用它作为答案。 如果您希望 LLM 在未检索到任何内容时提出自己的意见,请将此留空。`,
+ emptyResponseMessage: `当知识库中未检索到任何相关信息时,将触发空响应。由于未选择任何知识库,因此请清除“空响应”。`,
+ setAnOpener: '设置开场白',
+ setAnOpenerInitial: `你好! 我是你的助理,有什么可以帮到你的吗?`,
+ setAnOpenerTip: '您想如何欢迎您的客户?',
+ knowledgeBases: '知识库',
+ knowledgeBasesMessage: '请选择',
+ knowledgeBasesTip:
+ '选择关联的知识库。新建或空知识库不会在下拉菜单中显示。',
+ system: '系统提示词',
+ systemInitialValue: `你是一个智能助手,请总结知识库的内容来回答问题,请列举知识库中的数据详细回答。当所有知识库内容都与问题无关时,你的回答必须包括“知识库中未找到您要的答案!”这句话。回答需要考虑聊天历史。
+ 以下是知识库:
+ {knowledge}
+ 以上是知识库。`,
+ systemMessage: '请输入',
+ systemTip:
+ '当LLM回答问题时,你需要LLM遵循的说明,比如角色设计、答案长度和答案语言等。如果您的模型原生支持在问答中推理,可以通过 //no_thinking 关闭自动推理。',
+ topN: 'Top N',
+ topNTip: `并非所有相似度得分高于“相似度阈值”的块都会被提供给大语言模型。 LLM 只能看到这些“Top N”块。`,
+ variable: '变量',
+ variableTip: `你可以通过对话 API,并配合变量设置来动态调整大模型的系统提示词。
+ {knowledge}为系统预留变量,代表从指定知识库召回的文本块。
+ “系统提示词”中的所有变量都必须用大括号{}括起来。详见 https://ragflow.io/docs/dev/set_chat_variables。`,
+ add: '新增',
+ key: '关键字',
+ optional: '可选的',
+ operation: '操作',
+ model: '模型',
+ modelTip: '大语言聊天模型',
+ modelMessage: '请选择',
+ modelEnabledTools: '可用的工具',
+ modelEnabledToolsTip:
+ '请选择一个或多个可供该模型所使用的工具。仅对支持工具调用的模型生效。',
+ freedom: '自由度',
+ improvise: '即兴创作',
+ precise: '精确',
+ balance: '平衡',
+ custom: '自定义',
+ freedomTip: `“精确”意味着大语言模型会保守并谨慎地回答你的问题。 “即兴发挥”意味着你希望大语言模型能够自由地畅所欲言。 “平衡”是谨慎与自由之间的平衡。`,
+ temperature: '温度',
+ temperatureMessage: '温度是必填项',
+ temperatureTip:
+ '该参数控制模型预测的随机性。 较低的温度使模型对其响应更有信心,而较高的温度则使其更具创造性和多样性。',
+ topP: 'Top P',
+ topPMessage: 'Top P 是必填项',
+ topPTip:
+ '该参数也称为“核心采样”,它设置一个阈值来选择较小的单词集进行采样。 它专注于最可能的单词,剔除不太可能的单词。',
+ presencePenalty: '存在处罚',
+ presencePenaltyMessage: '存在处罚是必填项',
+ presencePenaltyTip:
+ '这会通过惩罚对话中已经出现的单词来阻止模型重复相同的信息。',
+ frequencyPenalty: '频率惩罚',
+ frequencyPenaltyMessage: '频率惩罚是必填项',
+ frequencyPenaltyTip:
+ '与存在惩罚类似,这减少了模型频繁重复相同单词的倾向。',
+ maxTokens: '最大token数',
+ maxTokensMessage: '最大token数是必填项',
+ maxTokensTip:
+ '这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
+ maxTokensInvalidMessage: '请输入有效的最大令牌数。',
+ maxTokensMinMessage: '最大令牌数不能小于 0。',
+ quote: '显示引文',
+ quoteTip: '是否应该显示原文出处?',
+ selfRag: 'Self-RAG',
+ selfRagTip: '请参考: https://huggingface.co/papers/2310.11511',
+ overview: '聊天 ID',
+ pv: '消息数',
+ uv: '活跃用户数',
+ speed: 'Token 输出速度',
+ tokens: '消耗Token数',
+ round: '会话互动数',
+ thumbUp: '用户满意度',
+ preview: '预览',
+ embedded: '嵌入',
+ serviceApiEndpoint: '服务API端点',
+ apiKey: 'API KEY',
+ apiReference: 'API 文档',
+ dateRange: '日期范围:',
+ backendServiceApi: 'API 服务器',
+ createNewKey: '创建新密钥',
+ created: '创建于',
+ action: '操作',
+ embedModalTitle: '嵌入网站',
+ comingSoon: '即将推出',
+ fullScreenTitle: '全屏嵌入',
+ fullScreenDescription: '将以下iframe嵌入您的网站处于所需位置',
+ partialTitle: '部分嵌入',
+ extensionTitle: 'Chrome 插件',
+ tokenError: '请先创建 API Token!',
+ betaError: '请先在系统设置中申请API密钥。',
+ searching: '搜索中',
+ parsing: '解析中',
+ uploading: '上传中',
+ uploadFailed: '上传失败',
+ regenerate: '重新生成',
+ read: '朗读内容',
+ tts: '文本转语音',
+ ttsTip: '是否用语音转换播放语音,请先在设置里面选择TTS(语音转换模型)。',
+ relatedQuestion: '相关问题',
+ answerTitle: '智能回答',
+ multiTurn: '多轮对话优化',
+ multiTurnTip:
+ '在多轮对话时,对查询问题根据上下文进行优化。会调用大模型额外消耗 token。',
+ howUseId: '如何使用聊天ID?',
+ description: '助理描述',
+ descriptionPlaceholder:
+ '例如 你是一个专业的简历助手,只能回答简历的问题。',
+ useKnowledgeGraph: '使用知识图谱',
+ useKnowledgeGraphTip:
+ '是否检索与所选知识库对应的知识图谱相关文本块,以处理复杂的多跳问题?这一过程将涉及对实体、关系和社区报告文本块的多次检索,会显著延长检索时间。',
+ keyword: '关键词分析',
+ keywordTip: `应用 LLM 分析用户的问题,提取在相关性计算中要强调的关键词。对长查询效果较好,但会延长响应时间。`,
+ reasoning: '推理',
+ reasoningTip:
+ '在问答过程中是否启用推理工作流,例如Deepseek-R1或OpenAI o1等模型所采用的方式。启用后,该功能允许模型访问外部知识,并借助思维链推理等技术逐步解决复杂问题。通过将问题分解为可处理的步骤,这种方法增强了模型提供准确回答的能力,从而在需要逻辑推理和多步思考的任务上表现更优。',
+ tavilyApiKeyTip:
+ '如果 API 密钥设置正确,它将利用 Tavily 进行网络搜索作为知识库的补充。',
+ tavilyApiKeyMessage: '请输入你的 Tavily API Key',
+ tavilyApiKeyHelp: '如何获取?',
+ crossLanguage: '跨语言搜索',
+ crossLanguageTip: `选择一种或多种语言进行跨语言搜索。如果未选择任何语言,系统将使用原始查询进行搜索。`,
+ metadata: '元数据',
+ metadataTip:
+ '元数据过滤是使用元数据属性(例如标签、类别或访问权限)来优化和控制系统内相关信息检索的过程。',
+ conditions: '条件',
+ addCondition: '增加条件',
+ meta: {
+ disabled: '禁用',
+ automatic: '自动',
+ manual: '手动',
+ },
+ cancel: '取消',
+ chatSetting: '聊天设置',
+ avatarHidden: '隐藏头像',
+ locale: '地区',
+ },
+ setting: {
+ profile: '概要',
+ avatar: '头像',
+ avatarTip: '这会在你的个人主页展示',
+ profileDescription: '在此更新您的照片和个人详细信息。',
+ maxTokens: '最大token数',
+ maxTokensMessage: '最大token数是必填项',
+ maxTokensTip:
+ '这设置了模型输出的最大长度,以标记(单词或单词片段)的数量来衡量。',
+ maxTokensInvalidMessage: '请输入有效的最大令牌数。',
+ maxTokensMinMessage: '最大令牌数不能小于 0。',
+ password: '密码',
+ passwordDescription: '请输入您当前的密码以更改您的密码。',
+ model: '模型提供商',
+ modelDescription: '在此设置模型参数和 API KEY。',
+ team: '团队',
+ system: '系统',
+ logout: '登出',
+ api: 'API',
+ username: '用户名',
+ usernameMessage: '请输入用户名',
+ photo: '头像',
+ photoDescription: '这将显示在您的个人资料上。',
+ colorSchema: '主题',
+ colorSchemaMessage: '请选择您的主题!',
+ colorSchemaPlaceholder: '请选择您的主题!',
+ bright: '明亮',
+ dark: '暗色',
+ timezone: '时区',
+ timezoneMessage: '请选择时区',
+ timezonePlaceholder: '请选择时区',
+ email: '邮箱地址',
+ emailDescription: '一旦注册,电子邮件将无法更改。',
+ currentPassword: '当前密码',
+ currentPasswordMessage: '请输入当前密码',
+ newPassword: '新密码',
+ changePassword: '修改密码',
+ newPasswordMessage: '请输入新密码',
+ newPasswordDescription: '您的新密码必须超过 8 个字符。',
+ confirmPassword: '确认新密码',
+ confirmPasswordMessage: '请确认新密码',
+ confirmPasswordNonMatchMessage: '您输入的新密码不匹配!',
+ cancel: '取消',
+ addedModels: '添加了的模型',
+ modelsToBeAdded: '待添加的模型',
+ addTheModel: '添加模型',
+ apiKey: 'API-Key',
+ apiKeyMessage: '请输入api key(如果是本地部署的模型,请忽略它)',
+ apiKeyTip: 'API key可以通过注册相应的LLM供应商来获取。',
+ showMoreModels: '展示更多模型',
+ hideModels: '隐藏模型',
+ baseUrl: 'Base-Url',
+ baseUrlTip:
+ '如果您的 API 密钥来自 OpenAI,请忽略它。 任何其他中间提供商都会提供带有 API 密钥的基本 URL。',
+ tongyiBaseUrlTip:
+ '对于中国用户,不需要填写或使用 https://dashscope.aliyuncs.com/compatible-mode/v1。对于国际用户,使用 https://dashscope-intl.aliyuncs.com/compatible-mode/v1。',
+ tongyiBaseUrlPlaceholder: '(仅国际用户需要)',
+ modify: '修改',
+ systemModelSettings: '设置默认模型',
+ chatModel: '聊天模型',
+ chatModelTip: '所有新创建的知识库都会使用默认的聊天模型。',
+ ttsModel: 'TTS模型',
+ ttsModelTip:
+ '默认的tts模型会被用于在对话过程中请求语音生成时使用。如未显示可选模型,请根据 https://ragflow.io/docs/dev/supported_models 确认你的模型供应商是否提供该模型。',
+ embeddingModel: '嵌入模型',
+ embeddingModelTip:
+ '所有新创建的知识库使用的默认嵌入模型。如未显示可选模型,请检查你是否在使用 RAGFlow slim 版(不含嵌入模型);或根据 https://ragflow.io/docs/dev/supported_models 确认你的模型供应商是否提供该模型。',
+ img2txtModel: 'Img2txt模型',
+ img2txtModelTip:
+ '所有新创建的知识库都将使用默认的 img2txt 模型。 它可以描述图片或视频。如未显示可选模型,请根据 https://ragflow.io/docs/dev/supported_models 确认你的模型供应商是否提供该模型。',
+ sequence2txtModel: 'Speech2txt模型',
+ sequence2txtModelTip:
+ '所有新创建的知识库都将使用默认的 ASR 模型。 使用此模型将语音翻译为相应的文本。如未显示可选模型,请根据 https://ragflow.io/docs/dev/supported_models 确认你的模型供应商是否提供该模型。',
+ rerankModel: 'Rerank模型',
+ rerankModelTip: `默认的 reranking 模型。如未显示可选模型,请根据 https://ragflow.io/docs/dev/supported_models 确认你的模型供应商是否提供该模型。`,
+ workspace: '工作空间',
+ upgrade: '升级',
+ addLlmTitle: '添加 LLM',
+ editLlmTitle: '编辑 {{name}} 模型',
+ editModel: '编辑模型',
+ modelName: '模型名称',
+ modelID: '模型ID',
+ modelUid: '模型UID',
+ modelType: '模型类型',
+ addLlmBaseUrl: '基础 Url',
+ vision: '是否支持 Vision',
+ modelNameMessage: '请输入模型名称!',
+ modelTypeMessage: '请输入模型类型!',
+ baseUrlNameMessage: '请输入基础 Url!',
+ ollamaLink: '如何集成 {{name}}',
+ FishAudioLink: '如何使用Fish Audio',
+ TencentCloudLink: '如何使用腾讯云语音识别',
+ volcModelNameMessage: '请输入模型名称!',
+ addEndpointID: '模型 EndpointID',
+ endpointIDMessage: '请输入模型对应的EndpointID',
+ addArkApiKey: '火山 ARK_API_KEY',
+ ArkApiKeyMessage: '请输入火山创建的ARK_API_KEY',
+ bedrockModelNameMessage: '请输入名称!',
+ addBedrockEngineAK: 'ACCESS KEY',
+ bedrockAKMessage: '请输入 ACCESS KEY',
+ addBedrockSK: 'SECRET KEY',
+ bedrockSKMessage: '请输入 SECRET KEY',
+ bedrockRegion: 'AWS Region',
+ bedrockRegionMessage: '请选择!',
+ 'us-east-1': '美国东部 (弗吉尼亚北部)',
+ 'us-west-2': '美国西部 (俄勒冈州)',
+ 'ap-southeast-1': '亚太地区 (新加坡)',
+ 'ap-northeast-1': '亚太地区 (东京)',
+ 'eu-central-1': '欧洲 (法兰克福)',
+ 'us-gov-west-1': 'AWS GovCloud (US-West)',
+ 'ap-southeast-2': '亚太地区 (悉尼)',
+ addHunyuanSID: '混元 Secret ID',
+ HunyuanSIDMessage: '请输入 Secret ID',
+ addHunyuanSK: '混元 Secret Key',
+ HunyuanSKMessage: '请输入 Secret Key',
+ addTencentCloudSID: '腾讯云 Secret ID',
+ TencentCloudSIDMessage: '请输入 Secret ID',
+ addTencentCloudSK: '腾讯云 Secret Key',
+ TencentCloudSKMessage: '请输入 Secret Key',
+ SparkModelNameMessage: '请选择星火模型!',
+ addSparkAPIPassword: '星火 APIPassword',
+ SparkAPIPasswordMessage: '请输入 APIPassword',
+ addSparkAPPID: '星火 APPID',
+ SparkAPPIDMessage: '请输入 APPID',
+ addSparkAPISecret: '星火 APISecret',
+ SparkAPISecretMessage: '请输入 APISecret',
+ addSparkAPIKey: '星火 APIKey',
+ SparkAPIKeyMessage: '请输入 APIKey',
+ yiyanModelNameMessage: '请输入模型名称',
+ addyiyanAK: '一言 API KEY',
+ yiyanAKMessage: '请输入 API KEY',
+ addyiyanSK: '一言 Secret KEY',
+ yiyanSKMessage: '请输入 Secret KEY',
+ FishAudioModelNameMessage: '请为你的TTS模型起名',
+ addFishAudioAK: 'Fish Audio API KEY',
+ FishAudioAKMessage: '请输入 API KEY',
+ addFishAudioRefID: 'FishAudio Refrence ID',
+ FishAudioRefIDMessage: '请输入引用模型的ID(留空表示使用默认模型)',
+ GoogleModelIDMessage: '请输入 model ID!',
+ addGoogleProjectID: 'Project ID',
+ GoogleProjectIDMessage: '请输入 Project ID',
+ addGoogleServiceAccountKey:
+ 'Service Account Key(Leave blank if you use Application Default Credentials)',
+ GoogleServiceAccountKeyMessage:
+ '请输入 Google Cloud Service Account Key in base64 format',
+ addGoogleRegion: 'Google Cloud 区域',
+ GoogleRegionMessage: '请输入 Google Cloud 区域',
+ modelProvidersWarn: `请先在模型提供商中添加嵌入模型和LLM,然后在“设置默认模型”中设置它们。`,
+ apiVersion: 'API版本',
+ apiVersionMessage: '请输入API版本!',
+ add: '添加',
+ updateDate: '更新日期',
+ role: '角色',
+ invite: '邀请',
+ agree: '同意',
+ refuse: '拒绝',
+ teamMembers: '团队成员',
+ joinedTeams: '加入的团队',
+ sureDelete: '您确定要删除该成员吗?',
+ quit: '退出',
+ sureQuit: '确定退出加入的团队吗?',
+ secretKey: '密钥',
+ publicKey: '公钥',
+ secretKeyMessage: '请输入私钥',
+ publicKeyMessage: '请输入公钥',
+ hostMessage: '请输入 host',
+ configuration: '配置',
+ langfuseDescription:
+ '跟踪、评估、提示管理和指标,以调试和改进您的 LLM 应用程序。',
+ viewLangfuseSDocumentation: '查看 Langfuse 的文档',
+ view: '查看',
+ modelsToBeAddedTooltip:
+ '如果你的模型供应商在这里没有列出,但是宣称 OpenAI-compatible,可以通过选择卡片 OpenAI-API-compatible 设置相关模型。',
+ mcp: 'MCP',
+ },
+ message: {
+ registered: '注册成功',
+ logout: '登出成功',
+ logged: '登录成功',
+ pleaseSelectChunk: '请选择解析块',
+ modified: '更新成功',
+ created: '创建成功',
+ deleted: '删除成功',
+ renamed: '重命名成功',
+ operated: '操作成功',
+ updated: '更新成功',
+ uploaded: '上传成功',
+ 200: '服务器成功返回请求的数据。',
+ 201: '新建或修改数据成功。',
+ 202: '一个请求已经进入后台排队(异步任务)。',
+ 204: '删除数据成功。',
+ 400: '发出的请求有错误,服务器没有进行新建或修改数据的操作。',
+ 401: '请重新登录。',
+ 403: '用户得到授权,但是访问是被禁止的。',
+ 404: '发出的请求针对的是不存在的记录,服务器没有进行操作。',
+ 406: '请求的格式不可得。',
+ 410: '请求的资源被永久删除,且不会再得到的。',
+ 413: '上传的文件总大小过大。',
+ 422: '当创建一个对象时,发生一个验证错误。',
+ 500: '服务器发生错误,请检查服务器。',
+ 502: '网关错误。',
+ 503: '服务不可用,服务器暂时过载或维护。',
+ 504: '网关超时。',
+ requestError: '请求错误',
+ networkAnomalyDescription: '您的网络发生异常,无法连接服务器',
+ networkAnomaly: '网络异常',
+ hint: '提示',
+ },
+ fileManager: {
+ files: '文件',
+ name: '名称',
+ uploadDate: '上传日期',
+ knowledgeBase: '知识库',
+ size: '大小',
+ action: '操作',
+ addToKnowledge: '链接知识库',
+ pleaseSelect: '请选择',
+ newFolder: '新建文件夹',
+ uploadFile: '上传文件',
+ parseOnCreation: '创建时解析',
+ uploadTitle: '点击或拖拽文件至此区域即可上传',
+ uploadDescription:
+ '支持单次或批量上传。 本地部署的单次上传文件总大小上限为 1GB,单次批量上传文件数不超过 32,单个账户不限文件数量。对于 demo.ragflow.io:每次上传的总文件大小限制为 10MB,每个文件不得超过 10MB,每个账户最多可上传 128 个文件。严禁上传违禁文件。',
+ file: '文件',
+ directory: '文件夹',
+ local: '本地上传',
+ s3: 'S3 上传',
+ preview: '预览',
+ fileError: '文件错误',
+ uploadLimit: '文件大小不能超过10M,文件总数不超过128个',
+ destinationFolder: '目标文件夹',
+ pleaseUploadAtLeastOneFile: '请上传至少一个文件',
+ },
+ flow: {
+ recommended: '推荐',
+ customerSupport: '客户支持',
+ marketing: '营销',
+ consumerApp: '消费者应用',
+ other: '其他',
+ agents: '智能体',
+ beginInput: '开始输入',
+ seconds: '秒',
+ ref: '引用变量',
+ stockCode: '股票代码',
+ apiKeyPlaceholder: '您的API密钥(从https://serpapi.com获取)',
+ flowStart: '开始',
+ flowNum: '编号',
+ test: '测试',
+ extractDepth: '深度提取',
+ format: '格式',
+ basic: '基本',
+ advanced: '高级',
+ general: '通用',
+ searchDepth: '深度搜索',
+ tavilyTopic: 'Tavily话题',
+ maxResults: '最大结果数',
+ includeAnswer: '包含答案',
+ includeRawContent: '包含原始内容',
+ includeImages: '包含图片',
+ includeImageDescriptions: '包含图片描述',
+ includeDomains: '包含域名',
+ ExcludeDomains: '排除域名',
+ days: '天数',
+ comma: '逗号',
+ semicolon: '分号',
+ period: '句点',
+ linebreak: '换行符',
+ tab: '制表符',
+ space: '空格',
+ delimiters: '分隔符',
+ merge: '合并',
+ split: '拆分',
+ script: '脚本',
+ iterationItemDescription:
+ '它是迭代过程中的当前元素,可以被后续流程引用和操作。',
+ guidingQuestion: '引导问题',
+ onFailure: '异常时',
+ userPromptDefaultValue:
+ 'This is the order you need to send to the agent.',
+ descriptionMessage: '这是一个用于特定任务的代理。',
+ search: '搜索',
+ communication: '通信',
+ developer: '开发者',
+ typeCommandOrsearch: '输入命令或或搜索...',
+ builtIn: '内置',
+ goto: '异常分支',
+ comment: '默认值',
+ ExceptionDefaultValue: '异常处理默认值',
+ exceptionMethod: '异常处理方法',
+ maxRounds: '最大反思轮数',
+ delayEfterError: '错误后延迟',
+ maxRetries: '最大反思轮数',
+ advancedSettings: '高级设置',
+ addTools: '添加工具',
+ sysPromptDefultValue: `
+
+ 你是一名乐于助人的助手,一名专注于为用户解决问题的 AI 助手。
+ 如果用户指定了特定领域,你需要在该领域展现专业性;如果没有,则以通用助手的方式工作。
+
+
+ 1. 理解用户请求。
+ 2. 将其分解为逻辑子任务。
+ 3. 逐步执行每个子任务,并清晰地进行推理。
+ 4. 验证准确性和一致性。
+ 5. 清晰地总结最终结果。
+`,
+ line: '单行文本',
+ paragraph: '段落文字',
+ options: '选项',
+ file: '文件',
+ integer: '数字',
+ boolean: '布尔值',
+ name: '名称',
+ singleLineText: '单行文本',
+ variableSettings: '变量设置',
+ multimodalModels: '多模态模型',
+ textOnlyModels: '仅文本模型',
+ allModels: '所有模型',
+ codeExecDescription: '用 Python 或者 Javascript 编写自定义逻辑',
+ stringTransformDescription:
+ '修改文本内容,目前支持文本分割、文本拼接操作',
+ foundation: '基础',
+ tools: '工具',
+ dataManipulation: '数据操控',
+ dialog: '对话',
+ flow: '工作流',
+ noMoreData: '没有更多数据了',
+ historyversion: '历史版本',
+ version: {
+ details: '版本详情',
+ download: '下载',
+ },
+ cite: '引用',
+ citeTip: '引用',
+ nameMessage: '请输入名称',
+ description: '描述',
+ examples: '示例',
+ to: '下一步',
+ msg: '消息',
+ msgTip: '输出上游组件的变量内容或者自己输入的文本。',
+ messagePlaceholder: '请输入您的消息内容,使用‘/’快速插入变量。',
+ messageMsg: '请输入消息或删除此字段。',
+ addField: '新增字段',
+ addMessage: '新增消息',
+ loop: '循环上限',
+ loopTip:
+ 'loop为当前组件循环次数上限,当循环次数超过loop的值时,说明组件不能完成当前任务,请重新优化agent',
+ yes: '是',
+ no: '否',
+ key: '键',
+ componentId: '组件ID',
+ add: '新增',
+ operation: '操作',
+ run: '运行',
+ save: '保存',
+ title: 'ID:',
+ beginDescription: '这是流程开始的地方',
+ answerDescription: `该组件用作机器人与人类之间的接口。它接收用户的输入并显示机器人的计算结果。`,
+ retrievalDescription: `此组件用于从知识库中检索相关信息。选择知识库。如果没有检索到任何内容,将返回“空响应”。`,
+ generateDescription: `此组件用于调用LLM生成文本,请注意提示的设置。`,
+ categorizeDescription: `此组件用于对文本进行分类。请指定类别的名称、描述和示例。每个类别都指向不同的下游组件。`,
+ relevantDescription: `该组件用来判断upstream的输出是否与用户最新的问题相关,‘是’代表相关,‘否’代表不相关。`,
+ rewriteQuestionDescription: `此组件用于细化用户的提问。通常,当用户的原始提问无法从知识库中检索到相关信息时,此组件可帮助您将问题更改为更符合知识库表达方式的适当问题。`,
+ messageDescription:
+ '该组件用来返回工作流最后产生的数据内容和原先设置的文本内容。',
+ keywordDescription: `该组件用于从用户的问题中提取关键词。Top N指定需要提取的关键词数量。`,
+ switchDescription: `该组件用于根据前面组件的输出评估条件,并相应地引导执行流程。通过定义各种情况并指定操作,或在不满足条件时采取默认操作,实现复杂的分支逻辑。`,
+ wikipediaDescription: `此组件用于从 https://www.wikipedia.org/ 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数量。`,
+ promptText: `请总结以下段落。注意数字,不要胡编乱造。段落如下:
+{input}
+以上就是你需要总结的内容。`,
+ createGraph: '创建智能体',
+ createFromTemplates: '从模板创建',
+ retrieval: '知识检索',
+ generate: '生成回答',
+ answer: '对话',
+ categorize: '问题分类',
+ relevant: '是否相关',
+ rewriteQuestion: '问题优化',
+ begin: '开始',
+ message: '回复消息',
+ blank: '空',
+ createFromNothing: '从无到有',
+ addItem: '新增',
+ addSubItem: '新增子项',
+ nameRequiredMsg: '名称不能为空',
+ nameRepeatedMsg: '名称不能重复',
+ keywordExtract: '关键词',
+ keywordExtractDescription: `该组件用于从用户的问题中提取关键词。Top N指定需要提取的关键词数量。`,
+ baidu: '百度',
+ baiduDescription: `此组件用于从 www.baidu.com 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数量。`,
+ duckDuckGo: 'DuckDuckGo',
+ duckDuckGoDescription:
+ '此元件用於從 www.duckduckgo.com 取得搜尋結果。通常,它作為知識庫的補充。 Top N 指定您需要調整的搜尋結果數。',
+ searXNG: 'SearXNG',
+ searXNGDescription:
+ '该组件通过您提供的 SearXNG 实例地址进行搜索。请设置 Top N 和实例 URL。',
+ channel: '频道',
+ channelTip: '针对该组件的输入进行文本搜索或新闻搜索',
+ text: '文本',
+ news: '新闻',
+ messageHistoryWindowSize: '历史消息窗口大小',
+ messageHistoryWindowSizeTip:
+ 'LLM 需要查看的对话历史窗口大小。越大越好。但要注意 LLM 的最大 Token 数。',
+ wikipedia: '维基百科',
+ emailTip:
+ '此组件用于从 https://pubmed.ncbi.nlm.nih.gov/ 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数。电子邮件是必填字段。',
+ email: '邮箱',
+ pubMed: 'PubMed',
+ pubMedDescription:
+ '此组件用于从 https://pubmed.ncbi.nlm.nih.gov/ 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数。电子邮件是必填字段。',
+ arXiv: 'ArXiv',
+ arXivDescription:
+ '此组件用于从 https://arxiv.org/ 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数量。',
+ sortBy: '排序方式',
+ submittedDate: '提交日期',
+ lastUpdatedDate: '最后更新日期',
+ relevance: '关联',
+ google: 'Google',
+ googleDescription:
+ '此组件用于从https://www.google.com/获取搜索结果。通常,它作为知识库的补充。Top N 和 SerpApi API 密钥指定您需要调整的搜索结果数量。',
+ bing: 'Bing',
+ bingDescription:
+ '此组件用于从 https://www.bing.com/ 获取搜索结果。通常,它作为知识库的补充。Top N 和 Bing Subscription-Key 指定您需要调整的搜索结果数量。',
+ apiKey: 'API密钥',
+ country: '国家和地区',
+ language: '语言',
+ googleScholar: '谷歌学术',
+ googleScholarDescription: `此组件用于从 https://scholar.google.com/ 获取搜索结果。通常,它作为知识库的补充。Top N 指定您需要调整的搜索结果数量。`,
+ yearLow: '开始年份',
+ yearHigh: '结束年份',
+ patents: '专利',
+ data: '数据',
+ deepL: 'DeepL',
+ deepLDescription:
+ '该组件用于从 https://www.deepl.com/ 获取翻译。通常,它提供更专业的翻译结果。',
+ authKey: '授权键',
+ sourceLang: '源语言',
+ targetLang: '目标语言',
+ gitHub: 'GitHub',
+ gitHubDescription:
+ '该组件用于从 https://github.com/ 搜索仓库。Top N 指定需要调整的搜索结果数量。',
+ baiduFanyi: '百度翻译',
+ baiduFanyiDescription:
+ '该组件用于从 https://fanyi.baidu.com/ 获取翻译。通常,它提供更专业的翻译结果',
+ appid: 'App id',
+ secretKey: '秘钥',
+ domain: '领域',
+ transType: '翻译类型',
+ baiduSecretKeyOptions: {
+ translate: '通用翻译',
+ fieldtranslate: '领域翻译',
+ },
+ baiduDomainOptions: {
+ it: '信息技术领域',
+ finance: '金融财经领域',
+ machinery: '机械制造领域',
+ senimed: '生物医药领域',
+ novel: '网络文学领域',
+ academic: '学术论文领域',
+ aerospace: '航空航天领域',
+ wiki: '人文社科领域',
+ news: '新闻资讯领域',
+ law: '法律法规领域',
+ contract: '合同领域',
+ },
+ baiduSourceLangOptions: {
+ auto: '自动检测',
+ zh: '中文',
+ en: '英语',
+ yue: '粤语',
+ wyw: '文言文',
+ jp: '日语',
+ kor: '韩语',
+ fra: '法语',
+ spa: '西班牙语',
+ th: '泰语',
+ ara: '阿拉伯语',
+ ru: '俄语',
+ pt: '葡萄牙语',
+ de: '德语',
+ it: '意大利语',
+ el: '希腊语',
+ nl: '荷兰语',
+ pl: '波兰语',
+ bul: '保加利亚语',
+ est: '爱沙尼亚语',
+ dan: '丹麦语',
+ fin: '芬兰语',
+ cs: '捷克语',
+ rom: '罗马尼亚语',
+ slo: '斯洛文尼亚语',
+ swe: '瑞典语',
+ hu: '匈牙利语',
+ cht: '繁体中文',
+ vie: '越南语',
+ },
+ qWeather: '和风天气',
+ qWeatherDescription:
+ '该组件用于从 https://www.qweather.com/ 获取天气相关信息。您可以获取天气、指数、空气质量。',
+ lang: '语言',
+ type: '类型',
+ webApiKey: 'Web API 密钥',
+ userType: '用户类型',
+ timePeriod: '时间段',
+ qWeatherLangOptions: {
+ zh: '简体中文',
+ 'zh-hant': '繁体中文',
+ en: '英文',
+ de: '德语',
+ es: '西班牙语',
+ fr: '法语',
+ it: '意大利语',
+ ja: '日语',
+ ko: '韩语',
+ ru: '俄语',
+ hi: '印地语',
+ th: '泰语',
+ ar: '阿拉伯语',
+ pt: '葡萄牙语',
+ bn: '孟加拉语',
+ ms: '马来语',
+ nl: '荷兰语',
+ el: '希腊语',
+ la: '拉丁语',
+ sv: '瑞典语',
+ id: '印尼语',
+ pl: '波兰语',
+ tr: '土耳其语',
+ cs: '捷克语',
+ et: '爱沙尼亚语',
+ vi: '越南语',
+ fil: '菲律宾语',
+ fi: '芬兰语',
+ he: '希伯来语',
+ is: '冰岛语',
+ nb: '挪威语',
+ },
+ qWeatherTypeOptions: {
+ weather: '天气预报',
+ indices: '天气生活指数',
+ airquality: '空气质量',
+ },
+ qWeatherUserTypeOptions: {
+ free: '免费订阅用户',
+ paid: '付费订阅用户',
+ },
+ qWeatherTimePeriodOptions: {
+ now: '现在',
+ '3d': '3天',
+ '7d': '7天',
+ '10d': '10天',
+ '15d': '12天',
+ '30d': '30天',
+ },
+ publish: 'API',
+ exeSQL: '执行 SQL',
+ exeSQLDescription:
+ '该组件通过SQL语句从相应的关系数据库中查询结果。支持MySQL,PostgreSQL,MariaDB。',
+ dbType: '数据库类型',
+ database: '数据库',
+ username: '用户名',
+ host: '主机',
+ port: '端口',
+ password: '密码',
+ switch: '条件',
+ logicalOperator: '操作符',
+ switchOperatorOptions: {
+ equal: '等于',
+ notEqual: '不等于',
+ gt: '大于',
+ ge: '大于等于',
+ lt: '小于',
+ le: '小于等于',
+ contains: '包含',
+ notContains: '不包含',
+ startWith: '开始是',
+ endWith: '结束是',
+ empty: '为空',
+ notEmpty: '不为空',
+ },
+ switchLogicOperatorOptions: {
+ and: '与',
+ or: '或',
+ },
+ operator: '操作符',
+ value: '值',
+ useTemplate: '使用',
+ wenCai: '问财',
+ queryType: '查询类型',
+ wenCaiDescription:
+ '该组件可用于获取广泛金融领域的信息,包括但不限于股票、基金等...',
+ wenCaiQueryTypeOptions: {
+ stock: '股票',
+ zhishu: '指数',
+ fund: '基金',
+ hkstock: '港股',
+ usstock: '美股',
+ threeboard: '新三板',
+ conbond: '可转债',
+ insurance: '保险',
+ futures: '期货',
+ lccp: '理财',
+ foreign_exchange: '外汇',
+ },
+ akShare: 'AkShare',
+ akShareDescription: '该组件可用于从东方财富网站获取相应股票的新闻信息。',
+ yahooFinance: '雅虎财经',
+ yahooFinanceDescription: '该组件根据提供的股票代码查询有关公司的信息。',
+ crawler: '网页爬虫',
+ crawlerDescription: '该组件可用于从指定url爬取html源码。',
+ proxy: '代理',
+ crawlerResultOptions: {
+ html: 'Html',
+ markdown: 'Markdown',
+ content: '文本',
+ },
+ extractType: '提取类型',
+ info: '信息',
+ history: '历史',
+ financials: '财务',
+ balanceSheet: '资产负债表',
+ cashFlowStatement: '现金流量表',
+ jin10: '金十',
+ jin10Description:
+ '该组件可用于从金十开放平台获取金融领域的信息,包括快讯、日历、行情、参考。',
+ flashType: '闪光类型',
+ filter: '筛选',
+ contain: '包含',
+ calendarType: '日历类型',
+ calendarDatashape: '日历数据形状',
+ symbolsDatatype: '符号数据类型',
+ symbolsType: '符号类型',
+ jin10TypeOptions: {
+ flash: '快讯',
+ calendar: '日历',
+ symbols: '行情',
+ news: '参考',
+ },
+ jin10FlashTypeOptions: {
+ '1': '市场快讯',
+ '2': '期货快讯',
+ '3': '美港快讯',
+ '4': 'A股快讯',
+ '5': '商品外汇快讯',
+ },
+ jin10CalendarTypeOptions: {
+ cj: '宏观数据日历',
+ qh: '期货日历',
+ hk: '港股日历',
+ us: '美股日历',
+ },
+ jin10CalendarDatashapeOptions: {
+ data: '数据',
+ event: ' 事件',
+ holiday: '假期',
+ },
+ jin10SymbolsTypeOptions: {
+ GOODS: '商品行情',
+ FOREX: '外汇行情',
+ FUTURE: '国际行情',
+ CRYPTO: '加密货币行情',
+ },
+ jin10SymbolsDatatypeOptions: {
+ symbols: '品种列表',
+ quotes: '最新行情',
+ },
+ concentrator: '集线器',
+ concentratorDescription:
+ '该组件可用于连接多个下游组件。它接收来自上游组件的输入并将其传递给每个下游组件。',
+ tuShare: 'TuShare',
+ tuShareDescription:
+ '该组件可用于从主流金融网站获取金融新闻简报,辅助行业和量化研究。',
+ tuShareSrcOptions: {
+ sina: '新浪财经',
+ wallstreetcn: '华尔街见闻',
+ '10jqka': '同花顺',
+ eastmoney: '东方财富',
+ yuncaijing: '云财经',
+ fenghuang: '凤凰新闻',
+ jinrongjie: '金融界',
+ },
+ token: 'Token',
+ src: '源',
+ startDate: '开始日期',
+ endDate: '结束日期',
+ keyword: '关键字',
+ note: '注释',
+ noteDescription: '注释',
+ notePlaceholder: '请输入注释',
+ invoke: 'HTTP 请求',
+ invokeDescription:
+ '该组件可以调用远程端点调用。将其他组件的输出作为参数或设置常量参数来调用远程函数。',
+ url: 'Url',
+ method: '方法',
+ timeout: '超时',
+ headers: '请求头',
+ cleanHtml: '清除 HTML',
+ cleanHtmlTip: '如果响应是 HTML 格式且只需要主要内容,请将其打开。',
+ reference: '引用',
+ input: '输入',
+ output: '输出',
+ parameter: '参数',
+ howUseId: '如何使用Agent ID?',
+ content: '内容',
+ operationResults: '运行结果',
+ autosaved: '已自动保存',
+ optional: '可选项',
+ pasteFileLink: '粘贴文件链接',
+ testRun: '试运行',
+ template: '模板转换',
+ templateDescription:
+ '该组件用于排版各种组件的输出。1、支持Jinja2模板,会先将输入转为对象后进行模版渲染2、同时保留原使用{参数}字符串替换的方式',
+ emailComponent: '邮件',
+ emailDescription: '发送邮件到指定邮箱',
+ smtpServer: 'SMTP服务器',
+ smtpPort: 'SMTP端口',
+ senderEmail: '发件人邮箱',
+ authCode: '授权码',
+ senderName: '发件人名称',
+ toEmail: '收件人邮箱',
+ ccEmail: '抄送邮箱',
+ emailSubject: '邮件主题',
+ emailContent: '邮件内容',
+ smtpServerRequired: '请输入SMTP服务器地址',
+ senderEmailRequired: '请输入发件人邮箱',
+ authCodeRequired: '请输入授权码',
+ toEmailRequired: '请输入收件人邮箱',
+ emailContentRequired: '请输入邮件内容',
+ emailSentSuccess: '邮件发送成功',
+ emailSentFailed: '邮件发送失败',
+ dynamicParameters: '动态参数说明',
+ jsonFormatTip: '上游组件需要传入以下格式的JSON字符串:',
+ toEmailTip: 'to_email: 收件人邮箱(必填)',
+ ccEmailTip: 'cc_email: 抄送邮箱(可选)',
+ subjectTip: 'subject: 邮件主题(可选)',
+ contentTip: 'content: 邮件内容(可选)',
+ jsonUploadTypeErrorMessage: '请上传json文件',
+ jsonUploadContentErrorMessage: 'json 文件错误',
+ iteration: '循环',
+ iterationDescription: `该组件负责迭代生成新的内容,对列表对象执行多次步骤直至输出所有结果。`,
+ delimiterTip: `该分隔符用于将输入文本分割成几个文本片段,每个文本片段的回显将作为每次迭代的输入项。`,
+ delimiterOptions: {
+ comma: '逗号',
+ lineBreak: '换行',
+ tab: '制表符',
+ underline: '下划线',
+ diagonal: '斜线',
+ minus: '连字符',
+ semicolon: '分号',
+ },
+ addCategory: '新增分类',
+ categoryName: '分类名称',
+ nextStep: '下一步',
+ insertVariableTip: `输入 / 插入变量`,
+ setting: '设置',
+ settings: {
+ agentSetting: 'Agent设置',
+ title: '标题',
+ description: '描述',
+ upload: '上传',
+ photo: '照片',
+ permissions: '权限',
+ permissionsTip: '你可以在这里设置团队访问权限。',
+ me: '仅限自己',
+ team: '团队',
+ },
+ systemPrompt: '系统提示词',
+ userPrompt: '用户提示词',
+ prompt: '提示词',
+ promptMessage: '提示词是必填项',
+ promptTip:
+ '系统提示为大模型提供任务描述、规定回复方式,以及设置其他各种要求。系统提示通常与 key (变量)合用,通过变量设置大模型的输入数据。你可以通过斜杠或者 (x) 按钮显示可用的 key。',
+ knowledgeBasesTip: '选择关联的知识库,或者在下方选择包含知识库ID的变量。',
+ knowledgeBaseVars: '知识库变量',
+ code: '代码',
+ codeDescription: '它允许开发人员编写自定义 Python 逻辑。',
+ inputVariables: '输入变量',
+ addVariable: '新增变量',
+ runningHintText: '正在运行中...🕞',
+ openingSwitch: '开场白开关',
+ openingCopy: '开场白文案',
+ openingSwitchTip: '您的用户将在开始时看到此欢迎消息。',
+ modeTip: '模式定义了工作流的启动方式。',
+ mode: '模式',
+ conversational: '对话式',
+ task: '任务',
+ beginInputTip: '通过定义输入参数,此内容可以被后续流程中的其他组件访问。',
+ query: '查询变量',
+ queryTip: '选择您想要使用的变量',
+ agent: '智能体',
+ addAgent: '添加智能体',
+ agentDescription: '构建具备推理、工具调用和多智能体协同的智能体组件。',
+ maxRecords: '最大记录数',
+ createAgent: '智能体流程',
+ stringTransform: '文本处理',
+ userFillUp: '等待输入',
+ userFillUpDescription: `此组件会暂停当前的流程并等待用户发送消息,接收到消息之后再进行之后的流程。`,
+
+ codeExec: '代码',
+ tavilySearch: 'Tavily 搜索',
+ tavilySearchDescription: '通过 Tavily 服务搜索结果',
+ tavilyExtract: 'Tavily 提取',
+ tavilyExtractDescription: 'Tavily 提取',
+ log: '日志',
+ management: '管理',
+ import: '导入',
+ export: '导出',
+ subject: '主题',
+ logTimeline: {
+ begin: '准备开始',
+ userFillUp: '等你输入',
+ agent: '智能体正在思考',
+ retrieval: '查找知识',
+ message: '回复',
+ awaitResponse: '等你输入',
+ switch: '选择最佳路线',
+ iteration: '批量处理',
+ categorize: '信息归类',
+ code: '运行小段代码',
+ textProcessing: '整理文字',
+ tavilySearch: '正在网上搜索',
+ tavilyExtract: '读取网页内容',
+ exeSQL: '查询数据库',
+ google: '正在网上搜索',
+ wikipedia: '搜索维基百科',
+ googleScholar: '学术检索',
+ gitHub: '搜索',
+ email: '发送邮件',
+ httpRequest: '请求接口',
+ wenCai: '查询财务数据',
+ },
+ sqlStatement: 'SQL 语句',
+ sqlStatementTip:
+ '在此处编写您的 SQL 查询。您可以使用变量、原始 SQL,或使用变量语法混合使用两者。',
+ frameworkPrompts: '框架',
+ release: '发布',
+ createFromBlank: '从空白创建',
+ createFromTemplate: '从模板创建',
+ importJsonFile: '导入 JSON 文件',
+ chooseAgentType: '选择智能体类型',
+ },
+ footer: {
+ profile: 'All rights reserved @ React',
+ },
+ layout: {
+ file: 'file',
+ knowledge: 'knowledge',
+ chat: 'chat',
+ },
+ llmTools: {
+ bad_calculator: {
+ name: '计算器',
+ description: '用于计算两个数的和的工具(会给出错误答案)',
+ params: {
+ a: '第一个数',
+ b: '第二个数',
+ },
+ },
+ },
+ modal: {
+ okText: '确认',
+ cancelText: '取消',
+ },
+ mcp: {
+ export: '导出',
+ import: '导入',
+ url: 'URL',
+ serverType: '服务器类型',
+ addMCP: '添加 MCP',
+ editMCP: '编辑 MCP',
+ toolsAvailable: '可用的工具',
+ mcpServers: 'MCP 服务器',
+ customizeTheListOfMcpServers: '自定义 MCP 服务器列表',
+ },
+ search: {
+ searchApps: '搜索',
+ createSearch: '创建查询',
+ searchGreeting: '今天我能为你做些什么?',
+ profile: '隐藏个人资料',
+ locale: '语言',
+ embedCode: '嵌入代码',
+ id: 'ID',
+ copySuccess: '复制成功',
+ welcomeBack: '欢迎回来',
+ searchSettings: '搜索设置',
+ name: '姓名',
+ avatar: '头像',
+ description: '描述',
+ datasets: '知识库',
+ rerankModel: 'rerank 模型',
+ AISummary: 'AI 总结',
+ enableWebSearch: '启用网页搜索',
+ enableRelatedSearch: '启用相关搜索',
+ showQueryMindmap: '显示查询思维导图',
+ embedApp: '嵌入网站',
+ relatedSearch: '相关搜索',
+ descriptionValue: '你是一位智能助手。',
+ okText: '保存',
+ cancelText: '返回',
+ chooseDataset: '请先选择知识库',
+ },
+ language: {
+ english: '英语',
+ chinese: '中文',
+ spanish: '西班牙语',
+ french: '法语',
+ german: '德语',
+ japanese: '日语',
+ korean: '韩语',
+ vietnamese: '越南语',
+ },
+ pagination: {
+ total: '总共 {{total}} 条',
+ page: '{{page}}条/页',
+ },
+ dataflowParser: {
+ parseSummary: '解析摘要',
+ parseSummaryTip: '解析器: deepdoc',
+ rerunFromCurrentStep: '从当前步骤重新运行',
+ rerunFromCurrentStepTip: '已修改,点击重新运行。',
+ confirmRerun: '确认重新运行流程',
+ confirmRerunModalContent: `
+
+ 您即将从 {{step}} 步骤开始重新运行该过程
+
+ 这将:
+
+ - 从当前步骤开始覆盖现有结果
+ - 创建新的日志条目进行跟踪
+ - 之前的步骤将保持不变
+
`,
+ changeStepModalTitle: '切换步骤警告',
+ changeStepModalContent: `
+ 您目前正在编辑此阶段的结果。
+ 如果您切换到后续阶段,您的更改将会丢失。
+ 要保留这些更改,请点击“重新运行”以重新运行当前阶段。
`,
+ changeStepModalConfirmText: '继续切换',
+ changeStepModalCancelText: '取消',
+ unlinkPipelineModalTitle: '解绑数据流',
+ unlinkPipelineModalContent: `
+ 一旦取消链接,该数据集将不再连接到当前数据管道。
+ 正在解析的文件将继续解析,直到完成。
+ 尚未解析的文件将不再被处理。
+ 你确定要继续吗?
`,
+ unlinkPipelineModalConfirmText: '解绑',
+ },
+ dataflow: {
+ parser: '解析器',
+ parserDescription: '从文件中提取原始文本和结构以供下游处理。',
+ tokenizer: '分词器',
+ tokenizerRequired: '请先添加Tokenizer节点',
+ tokenizerDescription:
+ '根据所选的搜索方法,将文本转换为所需的数据结构(例如,用于嵌入搜索的向量嵌入)。',
+ splitter: '分词器拆分器',
+ splitterDescription:
+ '根据分词器长度将文本拆分成块,并带有可选的分隔符和重叠。',
+ hierarchicalMergerDescription:
+ '使用正则表达式规则按标题层次结构将文档拆分成多个部分,以实现更精细的控制。',
+ hierarchicalMerger: '标题拆分器',
+ extractor: '提取器',
+ extractorDescription:
+ '使用 LLM 从文档块(例如摘要、分类等)中提取结构化见解。',
+ outputFormat: '输出格式',
+ lang: '语言',
+ fileFormats: '文件格式',
+ fields: '字段',
+ addParser: '增加解析器',
+ hierarchy: '层次结构',
+ regularExpressions: '正则表达式',
+ overlappedPercent: '重叠百分比',
+ searchMethod: '搜索方法',
+ filenameEmbdWeight: '文件名嵌入权重',
+ begin: '文件',
+ parserMethod: '解析方法',
+ systemPrompt: '系统提示词',
+ systemPromptPlaceholder:
+ '请输入用于图像分析的系统提示词,若为空则使用系统缺省值',
+ exportJson: '导出 JSON',
+ viewResult: '查看结果',
+ running: '运行中',
+ summary: '增强上下文',
+ keywords: '关键词',
+ questions: '问题',
+ metadata: '元数据',
+ fieldName: '结果目的地',
+ prompts: {
+ system: {
+ keywords: `角色
+你是一名文本分析员。
+
+任务
+从给定的文本内容中提取最重要的关键词/短语。
+
+要求
+- 总结文本内容,并给出最重要的5个关键词/短语。
+- 关键词必须与给定的文本内容使用相同的语言。
+- 关键词之间用英文逗号分隔。
+- 仅输出关键词。`,
+ questions: `角色
+你是一名文本分析员。
+
+任务
+针对给定的文本内容提出3个问题。
+
+要求
+- 理解并总结文本内容,并提出最重要的3个问题。
+- 问题的含义不应重叠。
+- 问题应尽可能涵盖文本的主要内容。
+- 问题必须与给定的文本内容使用相同的语言。
+- 每行一个问题。
+- 仅输出问题。`,
+ summary: `扮演一个精准的摘要者。你的任务是为提供的内容创建一个简洁且忠实于原文的摘要。
+
+关键说明:
+1. 准确性:摘要必须严格基于所提供的信息。请勿引入任何未明确说明的新事实、结论或解释。
+2. 语言:摘要必须使用与原文相同的语言。
+3. 客观性:不带偏见地呈现要点,保留内容的原始意图和语气。请勿进行编辑。
+4. 简洁性:专注于最重要的思想,省略细节和多余的内容。`,
+ metadata: `从给定内容中提取重要的结构化信息。仅输出有效的 JSON 字符串,不包含任何附加文本。如果未找到重要的结构化信息,则输出一个空的 JSON 对象:{}。
+
+重要的结构化信息可能包括:姓名、日期、地点、事件、关键事实、数字数据或其他可提取实体。`,
+ },
+ user: {
+ keywords: `文本内容
+[在此处插入文本]`,
+ questions: `文本内容
+[在此处插入文本]`,
+ summary: `要总结的文本:
+[在此处插入文本]`,
+ metadata: `内容:[在此处插入内容]`,
+ },
+ },
+ cancel: '取消',
+ filenameEmbeddingWeight: '文件名嵌入权重',
+ switchPromptMessage: '提示词将发生变化,请确认是否放弃已有提示词?',
+ },
+ datasetOverview: {
+ downloadTip: '正在从数据源下载文件。',
+ processingTip: '正在由数据流处理文件。',
+ totalFiles: '文件总数',
+ downloading: '正在下载',
+ processing: '正在处理',
},
},
-};
\ No newline at end of file
+};
diff --git a/src/pages/Login.tsx b/src/pages/Login.tsx
index fd78898..d8b6952 100644
--- a/src/pages/Login.tsx
+++ b/src/pages/Login.tsx
@@ -13,9 +13,11 @@ import {
AppBar,
Toolbar,
Card,
- CardContent
+ CardContent,
+ Alert
} from '@mui/material';
import LanguageSwitcher from '../components/LanguageSwitcher';
+import userService from '../services/user_service';
const Login = () => {
const { t } = useTranslation();
@@ -25,24 +27,40 @@ const Login = () => {
const [isSubmitting, setIsSubmitting] = useState(false);
const [emailError, setEmailError] = useState(false);
const [passwordError, setPasswordError] = useState(false);
+ const [loginError, setLoginError] = useState('');
const navigate = useNavigate();
console.log(t, t('en'), t('login'));
- const handleSubmit = (e: React.FormEvent) => {
+ const handleSubmit = async (e: React.FormEvent) => {
e.preventDefault();
const hasEmail = !!email.trim();
const hasPassword = !!password.trim();
setEmailError(!hasEmail);
setPasswordError(!hasPassword);
+ setLoginError('');
+
if (!hasEmail || !hasPassword) return;
setIsSubmitting(true);
- // 模拟登录过程
- setTimeout(() => {
- navigate('/');
- }, 800);
+
+ try {
+ const response = await userService.login({ email, password });
+
+ // if (response.code === 0) {
+ // // 登录成功,跳转到主页
+ // navigate('/');
+ // } else {
+ // // 登录失败,显示错误信息
+ // setLoginError(response.message || t('login.loginFailed'));
+ // }
+ } catch (error: any) {
+ // 处理网络错误或其他异常
+ setLoginError(error.message || t('login.networkError'));
+ } finally {
+ setIsSubmitting(false);
+ }
};
return (
@@ -84,6 +102,12 @@ const Login = () => {
+ {loginError && (
+
+ {loginError}
+
+ )}
+
`${api_host}/user/login/${channel}`,
+
+ // team
+ addTenantUser: (tenantId: string) => `${api_host}/tenant/${tenantId}/user`,
+ listTenantUser: (tenantId: string) =>
+ `${api_host}/tenant/${tenantId}/user/list`,
+ deleteTenantUser: (tenantId: string, userId: string) =>
+ `${api_host}/tenant/${tenantId}/user/${userId}`,
+ listTenant: `${api_host}/tenant/list`,
+ agreeTenant: (tenantId: string) => `${api_host}/tenant/agree/${tenantId}`,
+
+ // llm model
+ factories_list: `${api_host}/llm/factories`,
+ llm_list: `${api_host}/llm/list`,
+ my_llm: `${api_host}/llm/my_llms`,
+ set_api_key: `${api_host}/llm/set_api_key`,
+ add_llm: `${api_host}/llm/add_llm`,
+ delete_llm: `${api_host}/llm/delete_llm`,
+ deleteFactory: `${api_host}/llm/delete_factory`,
+
+ // plugin
+ llm_tools: `${api_host}/plugin/llm_tools`,
+
+ // knowledge base
+ kb_list: `${api_host}/kb/list`,
+ create_kb: `${api_host}/kb/create`,
+ update_kb: `${api_host}/kb/update`,
+ rm_kb: `${api_host}/kb/rm`,
+ get_kb_detail: `${api_host}/kb/detail`,
+ getKnowledgeGraph: (knowledgeId: string) =>
+ `${api_host}/kb/${knowledgeId}/knowledge_graph`,
+ getMeta: `${api_host}/kb/get_meta`,
+ getKnowledgeBasicInfo: `${api_host}/kb/basic_info`,
+ // data pipeline log
+ fetchDataPipelineLog: `${api_host}/kb/list_pipeline_logs`,
+ get_pipeline_detail: `${api_host}/kb/pipeline_log_detail`,
+ fetchPipelineDatasetLogs: `${api_host}/kb/list_pipeline_dataset_logs`,
+ runGraphRag: `${api_host}/kb/run_graphrag`,
+ traceGraphRag: `${api_host}/kb/trace_graphrag`,
+ runRaptor: `${api_host}/kb/run_raptor`,
+ traceRaptor: `${api_host}/kb/trace_raptor`,
+ unbindPipelineTask: ({ kb_id, type }: { kb_id: string; type: string }) =>
+ `${api_host}/kb/unbind_task?kb_id=${kb_id}&pipeline_task_type=${type}`,
+ pipelineRerun: `${api_host}/canvas/rerun`,
+
+ // tags
+ listTag: (knowledgeId: string) => `${api_host}/kb/${knowledgeId}/tags`,
+ listTagByKnowledgeIds: `${api_host}/kb/tags`,
+ removeTag: (knowledgeId: string) => `${api_host}/kb/${knowledgeId}/rm_tags`,
+ renameTag: (knowledgeId: string) =>
+ `${api_host}/kb/${knowledgeId}/rename_tag`,
+
+ // chunk
+ chunk_list: `${api_host}/chunk/list`,
+ create_chunk: `${api_host}/chunk/create`,
+ set_chunk: `${api_host}/chunk/set`,
+ get_chunk: `${api_host}/chunk/get`,
+ switch_chunk: `${api_host}/chunk/switch`,
+ rm_chunk: `${api_host}/chunk/rm`,
+ retrieval_test: `${api_host}/chunk/retrieval_test`,
+ knowledge_graph: `${api_host}/chunk/knowledge_graph`,
+
+ // document
+ get_document_list: `${api_host}/document/list`,
+ document_change_status: `${api_host}/document/change_status`,
+ document_rm: `${api_host}/document/rm`,
+ document_delete: `${api_host}/api/document`,
+ document_rename: `${api_host}/document/rename`,
+ document_create: `${api_host}/document/create`,
+ document_run: `${api_host}/document/run`,
+ document_change_parser: `${api_host}/document/change_parser`,
+ document_thumbnails: `${api_host}/document/thumbnails`,
+ get_document_file: `${api_host}/document/get`,
+ document_upload: `${api_host}/document/upload`,
+ web_crawl: `${api_host}/document/web_crawl`,
+ document_infos: `${api_host}/document/infos`,
+ upload_and_parse: `${api_host}/document/upload_and_parse`,
+ parse: `${api_host}/document/parse`,
+ setMeta: `${api_host}/document/set_meta`,
+ get_dataset_filter: `${api_host}/document/filter`,
+
+ // chat
+ setDialog: `${api_host}/dialog/set`,
+ getDialog: `${api_host}/dialog/get`,
+ removeDialog: `${api_host}/dialog/rm`,
+ listDialog: `${api_host}/dialog/list`,
+ setConversation: `${api_host}/conversation/set`,
+ getConversation: `${api_host}/conversation/get`,
+ getConversationSSE: `${api_host}/conversation/getsse`,
+ listConversation: `${api_host}/conversation/list`,
+ removeConversation: `${api_host}/conversation/rm`,
+ completeConversation: `${api_host}/conversation/completion`,
+ deleteMessage: `${api_host}/conversation/delete_msg`,
+ thumbup: `${api_host}/conversation/thumbup`,
+ tts: `${api_host}/conversation/tts`,
+ ask: `${api_host}/conversation/ask`,
+ mindmap: `${api_host}/conversation/mindmap`,
+ getRelatedQuestions: `${api_host}/conversation/related_questions`,
+ // chat for external
+ createToken: `${api_host}/api/new_token`,
+ listToken: `${api_host}/api/token_list`,
+ removeToken: `${api_host}/api/rm`,
+ getStats: `${api_host}/api/stats`,
+ createExternalConversation: `${api_host}/api/new_conversation`,
+ getExternalConversation: `${api_host}/api/conversation`,
+ completeExternalConversation: `${api_host}/api/completion`,
+ uploadAndParseExternal: `${api_host}/api/document/upload_and_parse`,
+
+ // next chat
+ listNextDialog: `${api_host}/dialog/next`,
+ fetchExternalChatInfo: (id: string) =>
+ `${ExternalApi}${api_host}/chatbots/${id}/info`,
+
+ // file manager
+ listFile: `${api_host}/file/list`,
+ uploadFile: `${api_host}/file/upload`,
+ removeFile: `${api_host}/file/rm`,
+ renameFile: `${api_host}/file/rename`,
+ getAllParentFolder: `${api_host}/file/all_parent_folder`,
+ createFolder: `${api_host}/file/create`,
+ connectFileToKnowledge: `${api_host}/file2document/convert`,
+ getFile: `${api_host}/file/get`,
+ moveFile: `${api_host}/file/mv`,
+
+ // system
+ getSystemVersion: `${api_host}/system/version`,
+ getSystemStatus: `${api_host}/system/status`,
+ getSystemTokenList: `${api_host}/system/token_list`,
+ createSystemToken: `${api_host}/system/new_token`,
+ listSystemToken: `${api_host}/system/token_list`,
+ removeSystemToken: `${api_host}/system/token`,
+ getSystemConfig: `${api_host}/system/config`,
+ setLangfuseConfig: `${api_host}/langfuse/api_key`,
+
+ // flow
+ listTemplates: `${api_host}/canvas/templates`,
+ listCanvas: `${api_host}/canvas/list`,
+ getCanvas: `${api_host}/canvas/get`,
+ getCanvasSSE: `${api_host}/canvas/getsse`,
+ removeCanvas: `${api_host}/canvas/rm`,
+ setCanvas: `${api_host}/canvas/set`,
+ settingCanvas: `${api_host}/canvas/setting`,
+ getListVersion: `${api_host}/canvas/getlistversion`,
+ getVersion: `${api_host}/canvas/getversion`,
+ resetCanvas: `${api_host}/canvas/reset`,
+ runCanvas: `${api_host}/canvas/completion`,
+ testDbConnect: `${api_host}/canvas/test_db_connect`,
+ getInputElements: `${api_host}/canvas/input_elements`,
+ debug: `${api_host}/canvas/debug`,
+ uploadCanvasFile: `${api_host}/canvas/upload`,
+ trace: `${api_host}/canvas/trace`,
+ // agent
+ inputForm: `${api_host}/canvas/input_form`,
+ fetchVersionList: (id: string) => `${api_host}/canvas/getlistversion/${id}`,
+ fetchVersion: (id: string) => `${api_host}/canvas/getversion/${id}`,
+ fetchCanvas: (id: string) => `${api_host}/canvas/get/${id}`,
+ fetchAgentAvatar: (id: string) => `${api_host}/canvas/getsse/${id}`,
+ uploadAgentFile: (id?: string) => `${api_host}/canvas/upload/${id}`,
+ fetchAgentLogs: (canvasId: string) =>
+ `${api_host}/canvas/${canvasId}/sessions`,
+ fetchExternalAgentInputs: (canvasId: string) =>
+ `${ExternalApi}${api_host}/agentbots/${canvasId}/inputs`,
+ prompt: `${api_host}/canvas/prompts`,
+ cancelDataflow: (id: string) => `${api_host}/canvas/cancel/${id}`,
+ downloadFile: `${api_host}/canvas/download`,
+
+ // mcp server
+ listMcpServer: `${api_host}/mcp_server/list`,
+ getMcpServer: `${api_host}/mcp_server/detail`,
+ createMcpServer: `${api_host}/mcp_server/create`,
+ updateMcpServer: `${api_host}/mcp_server/update`,
+ deleteMcpServer: `${api_host}/mcp_server/rm`,
+ importMcpServer: `${api_host}/mcp_server/import`,
+ exportMcpServer: `${api_host}/mcp_server/export`,
+ listMcpServerTools: `${api_host}/mcp_server/list_tools`,
+ testMcpServerTool: `${api_host}/mcp_server/test_tool`,
+ cacheMcpServerTool: `${api_host}/mcp_server/cache_tools`,
+ testMcpServer: `${api_host}/mcp_server/test_mcp`,
+
+ // next-search
+ createSearch: `${api_host}/search/create`,
+ getSearchList: `${api_host}/search/list`,
+ deleteSearch: `${api_host}/search/rm`,
+ getSearchDetail: `${api_host}/search/detail`,
+ getSearchDetailShare: `${ExternalApi}${api_host}/searchbots/detail`,
+ updateSearchSetting: `${api_host}/search/update`,
+ askShare: `${ExternalApi}${api_host}/searchbots/ask`,
+ mindmapShare: `${ExternalApi}${api_host}/searchbots/mindmap`,
+ getRelatedQuestionsShare: `${ExternalApi}${api_host}/searchbots/related_questions`,
+ retrievalTestShare: `${ExternalApi}${api_host}/searchbots/retrieval_test`,
+
+ // data pipeline
+ fetchDataflow: (id: string) => `${api_host}/dataflow/get/${id}`,
+ setDataflow: `${api_host}/dataflow/set`,
+ removeDataflow: `${api_host}/dataflow/rm`,
+ listDataflow: `${api_host}/dataflow/list`,
+ runDataflow: `${api_host}/dataflow/run`,
+};
diff --git a/src/services/user_service.ts b/src/services/user_service.ts
new file mode 100644
index 0000000..f0f0aa5
--- /dev/null
+++ b/src/services/user_service.ts
@@ -0,0 +1,74 @@
+import api from './api';
+import request, { post } from '@/utils/request';
+
+// 用户相关API服务
+const userService = {
+ // 用户登录
+ login: (data: { email: string; password: string }) => {
+ return post(api.login, data);
+ },
+
+ // 用户登出
+ logout: () => {
+ return request.get(api.logout);
+ },
+
+ // 用户注册
+ register: (data: { email: string; password: string; username?: string }) => {
+ return post(api.register, data);
+ },
+
+ // 获取用户信息
+ getUserInfo: () => {
+ return request.get(api.user_info);
+ },
+
+ // 更新用户设置
+ updateSetting: (data: any) => {
+ return post(api.setting, data);
+ },
+
+ // 获取租户信息
+ getTenantInfo: () => {
+ return request.get(api.tenant_info);
+ },
+
+ // 设置租户信息
+ setTenantInfo: (data: any) => {
+ return post(api.set_tenant_info, data);
+ },
+
+ // 获取登录渠道
+ getLoginChannels: () => {
+ return request.get(api.login_channels);
+ },
+
+ // 通过渠道登录
+ loginWithChannel: (channel: string) => {
+ window.location.href = api.login_channel(channel);
+ },
+
+ // 租户用户管理
+ listTenantUser: (tenantId: string) => {
+ return request.get(api.listTenantUser(tenantId));
+ },
+
+ addTenantUser: (tenantId: string, email: string) => {
+ return post(api.addTenantUser(tenantId), { email });
+ },
+
+ deleteTenantUser: ({ tenantId, userId }: { tenantId: string; userId: string }) => {
+ return request.delete(api.deleteTenantUser(tenantId, userId));
+ },
+
+ // 租户管理
+ listTenant: () => {
+ return request.get(api.listTenant);
+ },
+
+ agreeTenant: (tenantId: string) => {
+ return request.put(api.agreeTenant(tenantId));
+ },
+};
+
+export default userService;
\ No newline at end of file
diff --git a/src/utils/common.ts b/src/utils/common.ts
new file mode 100644
index 0000000..a1da2a0
--- /dev/null
+++ b/src/utils/common.ts
@@ -0,0 +1,24 @@
+import isObject from 'lodash/isObject';
+import snakeCase from 'lodash/snakeCase';
+
+export const isFormData = (data: unknown): data is FormData => {
+ return data instanceof FormData;
+};
+
+const excludedFields = ['img2txt_id', 'mcpServers'];
+
+const isExcludedField = (key: string) => {
+ return excludedFields.includes(key);
+};
+
+export const convertTheKeysOfTheObjectToSnake = (data: unknown) => {
+ if (isObject(data) && !isFormData(data)) {
+ return Object.keys(data).reduce>((pre, cur) => {
+ const value = (data as Record)[cur];
+ pre[isFormData(value) || isExcludedField(cur) ? cur : snakeCase(cur)] =
+ value;
+ return pre;
+ }, {});
+ }
+ return data;
+};
diff --git a/src/utils/request.ts b/src/utils/request.ts
new file mode 100644
index 0000000..ff431c0
--- /dev/null
+++ b/src/utils/request.ts
@@ -0,0 +1,188 @@
+import { Authorization } from '@/constants/authorization';
+import type { ResponseType } from '@/interfaces/database/base';
+import i18n from '@/locales';
+import axios from 'axios';
+import type { AxiosRequestConfig, AxiosInstance, AxiosResponse, InternalAxiosRequestConfig } from 'axios';
+
+import { snackbar, notification } from '@/utils/snackbarInstance';
+
+const FAILED_TO_FETCH = 'Failed to fetch';
+
+export const RetcodeMessage = {
+ 200: i18n.t('message.200'),
+ 201: i18n.t('message.201'),
+ 202: i18n.t('message.202'),
+ 204: i18n.t('message.204'),
+ 400: i18n.t('message.400'),
+ 401: i18n.t('message.401'),
+ 403: i18n.t('message.403'),
+ 404: i18n.t('message.404'),
+ 406: i18n.t('message.406'),
+ 410: i18n.t('message.410'),
+ 413: i18n.t('message.413'),
+ 422: i18n.t('message.422'),
+ 500: i18n.t('message.500'),
+ 502: i18n.t('message.502'),
+ 503: i18n.t('message.503'),
+ 504: i18n.t('message.504'),
+};
+
+export type ResultCode =
+ | 200
+ | 201
+ | 202
+ | 204
+ | 400
+ | 401
+ | 403
+ | 404
+ | 406
+ | 410
+ | 413
+ | 422
+ | 500
+ | 502
+ | 503
+ | 504;
+
+// 获取授权token
+const getAuthorization = (): string => {
+ return localStorage.getItem('token') || '';
+};
+
+// 重定向到登录页
+const redirectToLogin = (): void => {
+ localStorage.removeItem('token');
+ window.location.href = '/login';
+};
+
+// 转换对象键为snake_case
+const convertTheKeysOfTheObjectToSnake = (obj: any): any => {
+ if (!obj || typeof obj !== 'object') return obj;
+
+ if (Array.isArray(obj)) {
+ return obj.map(convertTheKeysOfTheObjectToSnake);
+ }
+
+ const result: any = {};
+ Object.keys(obj).forEach(key => {
+ const snakeKey = key.replace(/[A-Z]/g, letter => `_${letter.toLowerCase()}`);
+ result[snakeKey] = convertTheKeysOfTheObjectToSnake(obj[key]);
+ });
+
+ return result;
+};
+
+// 创建axios实例
+const request: AxiosInstance = axios.create({
+ baseURL: import.meta.env.VITE_API_BASE_URL,
+ timeout: 300000,
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+});
+
+// 请求拦截器
+request.interceptors.request.use(
+ (config: InternalAxiosRequestConfig) => {
+ // 转换数据格式
+ if (config.data) {
+ config.data = convertTheKeysOfTheObjectToSnake(config.data);
+ }
+ if (config.params) {
+ config.params = convertTheKeysOfTheObjectToSnake(config.params);
+ }
+
+ // 添加授权头
+ const token = getAuthorization();
+ if (token && !config.headers?.skipToken) {
+ config.headers[Authorization] = token;
+ }
+
+ return config;
+ },
+ (error) => {
+ return Promise.reject(error);
+ }
+);
+
+// 响应拦截器
+request.interceptors.response.use(
+ (response: AxiosResponse) => {
+ const { status } = response;
+
+ // 处理特定状态码
+ if (status === 413 || status === 504) {
+ snackbar.error(RetcodeMessage[status as ResultCode]);
+ }
+
+ // 处理blob类型响应
+ if (response.config.responseType === 'blob') {
+ return response;
+ }
+
+ const data: ResponseType = response.data;
+
+ // 处理业务错误码
+ if (data?.code === 100) {
+ snackbar.error(data?.message);
+ } else if (data?.code === 401) {
+ // notification.error({
+ // message: data?.message,
+ // description: data?.message,
+ // duration: 3,
+ // });
+ notification.error(data?.message);
+ redirectToLogin();
+ } else if (data?.code !== 0) {
+ // notification.error({
+ // message: `${i18n.t('message.hint')} : ${data?.code}`,
+ // description: data?.message,
+ // duration: 3,
+ // });
+ notification.error(`${i18n.t('message.hint')} : ${data?.code}`, data?.message);
+ }
+
+ return response;
+ },
+ (error) => {
+ // 处理网络错误
+ if (error.message === FAILED_TO_FETCH || !error.response) {
+ // notification.error({
+ // description: i18n.t('message.networkAnomalyDescription'),
+ // message: i18n.t('message.networkAnomaly'),
+ // });
+ notification.error(i18n.t('message.networkAnomaly'), i18n.t('message.networkAnomalyDescription'));
+ } else if (error.response) {
+ const { status, statusText } = error.response;
+ const errorText = RetcodeMessage[status as ResultCode] || statusText;
+
+ // notification.error({
+ // message: `${i18n.t('message.requestError')} ${status}`,
+ // description: errorText,
+ // });
+ notification.error(`${i18n.t('message.requestError')} ${status}`, errorText);
+ }
+
+ return Promise.reject(error);
+ }
+);
+
+export default request;
+
+// 便捷方法
+export const get = (url: string, config?: AxiosRequestConfig) => {
+ return request.get(url, config);
+};
+
+export const post = (url: string, data?: any, config?: AxiosRequestConfig) => {
+ return request.post(url, data, config);
+};
+
+export const put = (url: string, data?: any, config?: AxiosRequestConfig) => {
+ return request.put(url, data, config);
+};
+
+export const del = (url: string, config?: AxiosRequestConfig) => {
+ return request.delete(url, config);
+};
\ No newline at end of file
diff --git a/src/utils/snackbarInstance.ts b/src/utils/snackbarInstance.ts
new file mode 100644
index 0000000..41d70ea
--- /dev/null
+++ b/src/utils/snackbarInstance.ts
@@ -0,0 +1,29 @@
+// 为非组件文件提供全局 snackbar 实例
+const getSnackbarInstance = () => {
+ if (typeof window !== 'undefined') {
+ return (window as any).__snackbarInstance;
+ }
+ return null;
+};
+
+export const snackbar = {
+ success: (msg: string, duration?: number) =>
+ getSnackbarInstance()?.showMessage.success(msg, duration),
+ error: (msg: string, duration?: number) =>
+ getSnackbarInstance()?.showMessage.error(msg, duration),
+ warning: (msg: string, duration?: number) =>
+ getSnackbarInstance()?.showMessage.warning(msg, duration),
+ info: (msg: string, duration?: number) =>
+ getSnackbarInstance()?.showMessage.info(msg, duration),
+};
+
+export const notification = {
+ success: (title: string, message?: string, duration?: number) =>
+ getSnackbarInstance()?.showNotification.success(title, message, duration),
+ error: (title: string, message?: string, duration?: number) =>
+ getSnackbarInstance()?.showNotification.error(title, message, duration),
+ warning: (title: string, message?: string, duration?: number) =>
+ getSnackbarInstance()?.showNotification.warning(title, message, duration),
+ info: (title: string, message?: string, duration?: number) =>
+ getSnackbarInstance()?.showNotification.info(title, message, duration),
+};
\ No newline at end of file
diff --git a/tsconfig.app.json b/tsconfig.app.json
index a9b5a59..3238272 100644
--- a/tsconfig.app.json
+++ b/tsconfig.app.json
@@ -22,7 +22,13 @@
"noUnusedParameters": true,
"erasableSyntaxOnly": true,
"noFallthroughCasesInSwitch": true,
- "noUncheckedSideEffectImports": true
+ "noUncheckedSideEffectImports": true,
+
+ /* Path Aliases */
+ "baseUrl": ".",
+ "paths": {
+ "@/*": ["src/*"]
+ }
},
"include": ["src"]
}
diff --git a/tsconfig.json b/tsconfig.json
index 5888b8c..8d1d2e2 100644
--- a/tsconfig.json
+++ b/tsconfig.json
@@ -4,5 +4,8 @@
{ "path": "./tsconfig.app.json" },
{ "path": "./tsconfig.node.json" }
],
- "exclude": ["rag_web_core"]
+ // exclude rag_web_core/**/*
+ "exclude": [
+ "rag_web_core/**"
+ ]
}
diff --git a/vite.config.ts b/vite.config.ts
index 8b0f57b..ace547d 100644
--- a/vite.config.ts
+++ b/vite.config.ts
@@ -1,7 +1,13 @@
import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react'
+import path from 'path'
// https://vite.dev/config/
export default defineConfig({
plugins: [react()],
+ resolve: {
+ alias: {
+ '@': path.resolve(__dirname, './src'),
+ },
+ },
})