Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update zh-CN localization #201

Open
wants to merge 5 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
207 changes: 150 additions & 57 deletions zh-CN/chat.json

Large diffs are not rendered by default.

522 changes: 382 additions & 140 deletions zh-CN/config.json

Large diffs are not rendered by default.

179 changes: 140 additions & 39 deletions zh-CN/developer.json
Original file line number Diff line number Diff line change
@@ -1,68 +1,169 @@
{
"tabs/server": "本地服务器",
"tabs/extensions": "LM 运行环境",
"tabs/extensions": "LM运行环境",
"loadSettings/title": "加载设置",
"modelSettings/placeholder": "选择一个模型进行配置",
"loadedModels/noModels": "没有已加载的模型",
"modelSettings/placeholder": "选择一个模型以进行配置",

"loadedModels/noModels": "没有加载的模型",

"serverOptions/title": "服务器选项",
"serverOptions/configurableTitle": "可配置选项",
"serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio 使用端口 1234。如果该端口已被占用,您可能需要更改此设置。",
"serverOptions/port/hint": "设置本地服务器将使用的网络端口。默认情况下,LM Studio使用1234端口。如果该端口已被占用,则可能需要更改此设置。",
"serverOptions/port/subtitle": "监听的端口",
"serverOptions/autostart/title": "自动启动服务器",
"serverOptions/autostart/hint": "当加载模型时自动启动本地服务器",
"serverOptions/autostart/hint": "在应用程序或服务启动时自动开启LM Studio的本地LLM服务器。",
"serverOptions/port/integerWarning": "端口号必须是整数",
"serverOptions/port/invalidPortWarning": "端口号必须介于 1 到 65535 之间",
"serverOptions/cors/title": "启用 CORS",
"serverOptions/cors/hint1": "启用 CORS (跨源资源共享) 允许您访问的网站向 LM Studio 服务器发起请求。",
"serverOptions/cors/hint2": "当从网页或 VS Code / 其他扩展发起请求时,可能需要启用 CORS。",
"serverOptions/cors/subtitle": "允许跨源请求",
"serverOptions/network/title": "在网络中提供服务",
"serverOptions/network/subtitle": "向网络中的设备开放服务器",
"serverOptions/port/invalidPortWarning": "端口必须在1到65535之间",
"serverOptions/cors/title": "启用CORS",
"serverOptions/cors/hint1": "启用CORS(跨域资源共享)将允许您访问的网站向LM Studio服务器发送请求。",
"serverOptions/cors/hint2": "当从网页或VS Code/其他扩展发出请求时,可能需要启用CORS。",
"serverOptions/cors/subtitle": "允许跨域请求",
"serverOptions/network/title": "在局域网中提供服务",
"serverOptions/network/subtitle": "将服务器暴露给网络中的其他设备",
"serverOptions/network/hint1": "是否允许来自网络中其他设备的连接。",
"serverOptions/network/hint2": "如果未选中,服务器将仅监听本地主机。",
"serverOptions/network/hint2": "如果不勾选,服务器将仅监听localhost。",
"serverOptions/verboseLogging/title": "详细日志记录",
"serverOptions/verboseLogging/subtitle": "为本地服务器启用详细日志记录",
"serverOptions/contentLogging/title": "记录提示和响应",
"serverOptions/contentLogging/subtitle": "本地请求/响应日志记录设置",
"serverOptions/contentLogging/subtitle": "本地请求/响应日志设置",
"serverOptions/contentLogging/hint": "是否在本地服务器日志文件中记录提示和/或响应。",
"serverOptions/fileLoggingMode/title": "文件日志模式",
"serverOptions/fileLoggingMode/off/title": "关闭",
"serverOptions/fileLoggingMode/off/hint": "不创建日志文件",
"serverOptions/fileLoggingMode/succinct/title": "简洁",
"serverOptions/fileLoggingMode/succinct/hint": "记录与控制台相同的内容。长请求将被截断。",
"serverOptions/fileLoggingMode/full/title": "完整",
"serverOptions/fileLoggingMode/full/hint": "不截断长请求。",
"serverOptions/jitModelLoading/title": "即时模型加载",
"serverOptions/jitModelLoading/hint": "启用后,如果请求指定了一个未加载的模型,该模型将自动加载并使用。此外,\"/v1/models\" 端点还将包含尚未加载的模型。",
"serverOptions/loadModel/error": "加载模型失败",

"serverOptions/jitModelLoading/hint": "启用后,如果请求指定了未加载的模型,它将自动加载并使用。此外,“/v1/models”端点还将包含尚未加载的模型。",
"serverOptions/loadModel/error": "无法加载模型",
"serverOptions/jitModelLoadingTTL/title": "自动卸载未使用的JIT加载模型",
"serverOptions/jitModelLoadingTTL/hint": "通过即时加载(JIT)加载的模型,在一段时间(TTL)内未被使用后将被自动卸载。",
"serverOptions/jitModelLoadingTTL/ttl/label": "最大空闲TTL",
"serverOptions/jitModelLoadingTTL/ttl/unit": "分钟",
"serverOptions/unloadPreviousJITModelOnLoad/title": "仅保留最后的JIT加载模型",
"serverOptions/unloadPreviousJITModelOnLoad/hint": "确保在任何给定时间最多只有一个通过JIT加载的模型(卸载之前的模型)。",

"serverLogs/scrollToBottom": "跳转到底部",
"serverLogs/clearLogs": "清除日志 ({{shortcut}})",
"serverLogs/openLogsFolder": "打开服务器日志文件夹",

"runtimeSettings/title": "运行环境设置",
"runtimeSettings/chooseRuntime/title": "配置运行环境",
"runtimeSettings/chooseRuntime/description": "为每个模型格式选择一个运行环境",
"runtimeSettings/chooseRuntime/title": "默认选择",
"runtimeSettings/chooseRuntime/description": "为每种模型格式选择默认的运行环境",
"runtimeSettings/chooseRuntime/showAllVersions/label": "显示所有运行环境",
"runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio 只显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
"runtimeSettings/chooseRuntime/showAllVersions/hint": "默认情况下,LM Studio仅显示每个兼容运行环境的最新版本。启用此选项可以查看所有可用的运行环境。",
"runtimeSettings/chooseRuntime/select/placeholder": "选择一个运行环境",

"runtimeOptions/uninstall": "卸载",
"runtimeOptions/uninstallDialog/title": "卸载 {{runtimeName}}?",
"runtimeOptions/uninstallDialog/body": "卸载此运行环境将从系统中移除它。此操作不可逆。",
"runtimeOptions/uninstallDialog/body/caveats": "某些文件可能需要在重启 LM Studio 后才能被移除。",
"runtimeOptions/uninstallDialog/error": "卸载运行环境失败",
"runtimeOptions/uninstallDialog/body": "卸载此运行环境将将其从系统中移除。此操作不可逆。",
"runtimeOptions/uninstallDialog/body/caveats": "某些文件可能只有在LM Studio重新启动后才会被删除。",
"runtimeOptions/uninstallDialog/error": "无法卸载运行环境",
"runtimeOptions/uninstallDialog/confirm": "继续并卸载",
"runtimeOptions/uninstallDialog/cancel": "取消",
"runtimeOptions/noCompatibleRuntimes": "未找到兼容的运行环境",
"runtimeOptions/downloadIncompatibleRuntime": "此运行环境被认为与您的机器不兼容。它很可能无法正常工作。",
"runtimeOptions/downloadIncompatibleRuntime": "此运行环境被确定为与您的机器不兼容。它很可能无法正常工作。",
"runtimeOptions/noRuntimes": "未找到运行环境",

"inferenceParams/noParams": "此模型类型没有可配置的推理参数",

"endpoints/openaiCompatRest/title": "支持的端点 (类似 OpenAI 的)",
"endpoints/openaiCompatRest/getModels": "列出当前已加载的模型",
"endpoints/openaiCompatRest/postCompletions": "文本补全模式。给定一个提示,预测下一个词元(token)。注意:OpenAI 认为此端点已'弃用'。",
"endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手响应",

"runtimes": {
"manageLMRuntimes": "管理LM运行环境",
"includeOlderRuntimeVersions": "包括旧版运行环境",
"dismiss": "关闭",
"updateAvailableToast": {
"title": "LM运行环境更新可用!"
},
"updatedToast": {
"title": " ✅ LM运行环境已更新:{{runtime}} → v{{version}}",
"preferencesUpdated": "新加载的{{compatibilityTypes}}模型将使用更新后的运行环境。"
},
"noAvx2ErrorMessage": "所有LM运行环境当前都需要支持AVX2的CPU",
"downloadableRuntimes": {
"runtimeExtensionPacks": "运行环境扩展包",
"refresh": "刷新",
"refreshing": "正在刷新...",
"filterSegment": {
"compatibleOnly": "仅兼容",
"all": "全部"
},
"card": {
"releaseNotes": "发行说明",
"latestVersionInstalled": "已安装最新版本",
"updateAvailable": "有可用更新"
}
},
"installedRuntimes": {
"manage": {
"title": "管理活动运行环境"
},
"dropdownOptions": {
"installedVersions": "管理版本",
"close": "关闭"
},
"tabs": {
"all": "全部",
"frameworks": "我的框架",
"engines": "我的引擎"
},
"detailsModal": {
"installedVersions": "{{runtimeName}} 的已安装版本",
"manifestJsonTitle": "清单JSON(高级)",
"releaseNotesTitle": "发行说明",
"noReleaseNotes": "此版本没有可用的发行说明",
"back": "返回",
"close": "关闭"
},
"noEngines": "未安装引擎",
"noFrameworks": "未安装框架"
}
},

"inferenceParams/noParams": "此模型类型没有可用的可配置推理参数",

"quickDocs": {
"tabChipTitle": "快速文档",
"newToolUsePopover": "代码片段现在可以在“快速文档”中使用。点击这里开始使用工具!",
"newToolUsePopoverTitle": "📚 快速文档",
"learnMore": "ℹ️ 👾 要了解更多关于LM Studio本地服务器端点的信息,请访问[文档](https://lmstudio.ai/docs)。",
"helloWorld": {
"title": "你好,世界!"
},
"chat": {
"title": "聊天"
},
"structuredOutput": {
"title": "结构化输出"
},
"imageInput": {
"title": "图像输入"
},
"embeddings": {
"title": "嵌入"
},
"toolUse": {
"title": "工具使用",
"tab": {
"saveAsPythonFile": "保存为Python文件",
"runTheScript": "运行脚本:",
"savePythonFileCopyPaste": "保存为Python文件以便复制粘贴命令"
}
},
"newBadge": "新"
},

"endpoints/openaiCompatRest/title": "支持的端点(类似OpenAI)",
"endpoints/openaiCompatRest/getModels": "列出当前加载的模型",
"endpoints/openaiCompatRest/postCompletions": "文本补全模式。根据提示预测下一个令牌。注意:OpenAI认为此端点已‘弃用’。",
"endpoints/openaiCompatRest/postChatCompletions": "聊天补全。向模型发送聊天历史以预测下一个助手回复",
"endpoints/openaiCompatRest/postEmbeddings": "文本嵌入。为给定的文本输入生成文本嵌入。接受字符串或字符串数组。",

"model.createVirtualModelFromInstance": "将设置保存为新的虚拟模型",
"model.createVirtualModelFromInstance/error": "无法将设置保存为新的虚拟模型",

"model": {
"toolUseSectionTitle": "工具使用",
"toolUseDescription": "检测到此模型已经过工具使用的训练\n\n打开<custom-link>快速文档</custom-link>获取更多信息"
},

"model.createVirtualModelFromInstance": "另存为新的虚拟模型",
"model.createVirtualModelFromInstance/error": "另存为新的虚拟模型失败",

"apiConfigOptions/title": "API 配置"
"apiConfigOptions/title": "API配置"
}
26 changes: 13 additions & 13 deletions zh-CN/discover.json
Original file line number Diff line number Diff line change
@@ -1,26 +1,26 @@
{
"collectionsColumn": "集合",
"collectionsColumn/collectionError": "加载集合详情时出错,请尝试上方的刷新按钮",
"collectionsColumn/collectionError": "加载集合详情时出错,请尝试刷新",
"bookmarksColumn": "书签",
"searchBar/placeholder": "在 Hugging Face 上搜索模型...",
"searchBar/huggingFaceError": "从 Hugging Face 获取结果时出现错误,请稍后再试",
"sortBy": "排序依据",
"searchBar/placeholder": "在Hugging Face上搜索模型...",
"searchBar/huggingFaceError": "从Hugging Face获取结果时出错,请稍后再试",
"sortBy": "排序方式",
"searchSortKey.default/title": "最佳匹配",
"searchSortKey.likes/title": "最多点赞",
"searchSortKey.downloads/title": "最多下载",
"searchSortKey.lastModified/title": "最近更新",
"searchSortKey.createdAt/title": "最近创建",
"download.option.willFitEstimation.caveat": "可能存在其他因素阻止其加载,例如模型架构、模型文件完整性或计算机上可用的内存量。",
"download.option.willFitEstimation.fullGPUOffload/title": "完全 GPU 加载可能",
"download.option.willFitEstimation.fullGPUOffload/description": "此模型可能完全适合您的 GPU 内存。这可能会显著加快推理速度。",
"download.option.willFitEstimation.partialGPUOffload/title": "部分 GPU 加载可能",
"download.option.willFitEstimation.partialGPUOffload/description": "此模型可能部分适合您的 GPU 内存。这通常会显著加快推理速度。",
"download.option.willFitEstimation.caveat": "可能还有其他因素会阻止其加载,例如模型的架构、模型文件完整性或计算机上可用的内存数量。",
"download.option.willFitEstimation.fullGPUOffload/title": "完全GPU卸载可能",
"download.option.willFitEstimation.fullGPUOffload/description": "该模型可能完全适合您的GPU内存。这可以显著加快推理速度。",
"download.option.willFitEstimation.partialGPUOffload/title": "部分GPU卸载可能",
"download.option.willFitEstimation.partialGPUOffload/description": "该模型可能部分适合您的GPU内存。这通常可以显著加快推理速度。",
"download.option.willFitEstimation.fitWithoutGPU/title": "可能适合",
"download.option.willFitEstimation.fitWithoutGPU/description": "此模型可能适合您的机器内存。",
"download.option.willFitEstimation.willNotFit/title": "对于此机器可能过大",
"download.option.willFitEstimation.willNotFit/description": "成功使用此模型文件所需的内存可能超过您机器上的可用资源。下载此文件不推荐。",
"download.option.willFitEstimation.fitWithoutGPU/description": "该模型很可能适合您机器的内存。",
"download.option.willFitEstimation.willNotFit/title": "对本机来说可能过大",
"download.option.willFitEstimation.willNotFit/description": "成功使用此模型文件所需的内存可能超出您机器上的可用资源。不建议下载此文件。",
"download.option.recommended/title": "推荐",
"download.option.recommended/description": "基于您的硬件,此选项被推荐。",
"download.option.recommended/description": "基于您的硬件,此选项是推荐的。",
"download.option.downloaded/title": "已下载",
"download.option.downloading/title": "正在下载 ({{progressPercentile}}%)",

Expand Down
20 changes: 10 additions & 10 deletions zh-CN/download.json
Original file line number Diff line number Diff line change
@@ -1,23 +1,23 @@
{
"postDownloadActionExecutor.zipExtraction/status": "解压中...",
"finalizing": "完成下载...(这可能需要几分钟)",
"postDownloadActionExecutor.zipExtraction/status": "正在解压...",
"finalizing": "正在完成下载...(这可能需要一些时间)",
"noOptions": "没有可用的兼容下载选项",
"deeplink/confirmation/title": "从 Hugging Face 下载模型 🤗",

"deeplink/confirmation/title": "从 Hugging Face 🤗 下载模型",
"deeplink/confirmation/subtitle": "{{modelName}}",
"deeplink/confirmation/selectRecommended": "选择推荐项",
"deeplink/confirmation/selectOption": "选择下载选项",
"deeplink/confirmation/recommendedOption": "对大多数用户来说可能是最佳选项",
"deeplink/confirmation/recommendedOption": "可能是大多数用户的最佳选择",
"deeplink/confirmation/downloadButton": "下载",
"deeplink/confirmation/nevermindButton": "算了",
"deeplink/confirmation/modelPresent/title": "找到 Hugging Face 模型 ✅",
"deeplink/confirmation/modelPresent/body": "好消息!此模型文件已经在您的本地机器上可用。",
"deeplink/confirmation/modelPresent/title": "已找到 Hugging Face 模型 ✅",
"deeplink/confirmation/modelPresent/body": "好消息!该模型文件已经在您的本地机器上可用。",
"deeplink/confirmation/loadInChat": "在新聊天中加载 {{ modelName }}",
"deeplink/error/modelNotFound/title": "哎呀,我们未能找到此模型",
"deeplink/error/modelNotFound/body": "请再次检查模型名称,并考虑尝试不同的下载选项。",
"deeplink/error/modelNotFound/title": "哎呀,我们无法找到该模型",
"deeplink/error/modelNotFound/body": "请仔细检查模型名称,并考虑尝试其他下载选项。",
"deeplink/actions/trySearching": "尝试在 Hugging Face 上搜索 {{modelName}}",

"downloadsPanel/title": "下载",
"downloadsPanel/sectionTitle/ongoing": "正在进行",
"downloadsPanel/sectionTitle/ongoing": "进行中",
"downloadsPanel/sectionTitle/completed": "已完成"
}
Loading