[{"data":1,"prerenderedAt":3244},["ShallowReactive",2],{"search-docs":3,"doc-\u002Fai\u002Fllm\u002Ffine-tuning":886},[4,8,12,16,20,24,28,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,96,100,104,108,112,116,120,124,128,132,136,140,144,148,152,156,159,162,165,169,172,175,178,182,186,190,194,198,202,206,210,214,218,222,226,230,234,238,242,246,250,254,258,262,266,269,273,277,281,285,288,291,294,298,301,304,307,310,313,316,319,322,325,329,332,336,340,344,348,352,356,359,362,365,368,371,374,377,380,383,386,389,393,396,399,402,405,408,411,414,417,420,424,428,432,435,438,442,446,450,454,458,462,466,470,474,477,480,483,487,491,494,497,500,504,507,511,515,518,521,524,527,530,533,536,539,542,545,548,551,554,557,560,563,566,569,572,575,579,583,587,591,595,599,603,606,610,614,617,620,623,626,629,633,637,640,643,646,649,652,655,658,661,664,667,670,673,676,679,682,685,688,691,694,697,700,703,706,709,712,716,720,724,728,732,736,740,744,748,752,756,760,764,768,772,775,779,783,787,790,793,796,799,802,805,808,811,814,818,822,825,829,832,835,838,841,844,848,851,854,858,862,865,869,873,876,879,882],{"path":5,"title":6,"description":7},"\u002Fabout\u002Fauthor","作者相关","只想纯粹的做一个程序员...",{"path":9,"title":10,"description":11},"\u002Fabout\u002Fjourney","心路历程","",{"path":13,"title":14,"description":15},"\u002Fai\u002Fagent\u002Fframeworks","Agent 框架","主流 Agent 框架：LangChain、LlamaIndex、AutoGen、CrewAI",{"path":17,"title":18,"description":19},"\u002Fai\u002Fagent\u002Fhooks","Agent Hooks 与自动化","Claude Agent 的 Hooks 生命周期、事件类型、典型自动化场景",{"path":21,"title":22,"description":23},"\u002Fai\u002Fagent\u002Fintroduction","AI Agent 概述","AI Agent 核心概念：感知、规划、执行、记忆",{"path":25,"title":26,"description":27},"\u002Fai\u002Fagent\u002Fpractice","Agent 实战","AI Agent 实战：构建自主任务执行系统",{"path":29,"title":30,"description":31},"\u002Fai\u002Fagent\u002Fsdk","Claude Agent SDK 开发","使用 Claude Agent SDK 构建自定义 AI Agent：架构、API、生命周期",{"path":33,"title":34,"description":35},"\u002Fai\u002Fagent\u002Fsubagents","Subagents 子代理","用 Subagents 分解复杂任务、并发执行、隔离上下文",{"path":37,"title":38,"description":39},"\u002Fai\u002Fagent\u002Ftool-use","工具调用","AI Agent 工具调用：Function Calling、Tool Use 原理与实践",{"path":41,"title":42,"description":43},"\u002Fai\u002Ffundamentals\u002Fdeep-learning","深度学习入门","深度学习基础知识：前向传播、反向传播、损失函数、优化器",{"path":45,"title":46,"description":47},"\u002Fai\u002Ffundamentals\u002Fml-basics","机器学习基础","机器学习核心概念：监督学习、无监督学习、强化学习",{"path":49,"title":50,"description":51},"\u002Fai\u002Ffundamentals\u002Fneural-networks","神经网络原理","神经网络架构：CNN、RNN、注意力机制",{"path":53,"title":54,"description":55},"\u002Fai\u002Fgetting-started","AI 学习路线","AI 技术学习路线图，从基础到实战的完整指南",{"path":57,"title":58,"description":59},"\u002Fai\u002Fllm\u002Ffine-tuning","模型微调","大模型微调技术：LoRA、QLoRA、全量微调、RLHF",{"path":61,"title":62,"description":63},"\u002Fai\u002Fllm\u002Fintroduction","大模型概述","大语言模型发展历程、核心能力与主流模型对比",{"path":65,"title":66,"description":67},"\u002Fai\u002Fllm\u002Flocal-deploy","本地部署","大模型本地部署：Ollama、vLLM、llama.cpp",{"path":69,"title":70,"description":71},"\u002Fai\u002Fllm\u002Ftransformer","Transformer 架构","Transformer 架构详解：自注意力机制、位置编码、多头注意力",{"path":73,"title":74,"description":75},"\u002Fai\u002Fmcp\u002Fclient","MCP Client 开发","MCP Client 开发指南：连接、调用、集成",{"path":77,"title":78,"description":79},"\u002Fai\u002Fmcp\u002Fdebugging","MCP 调试与排错","MCP Server 开发与集成过程中的常见问题、日志分析、诊断工具",{"path":81,"title":82,"description":83},"\u002Fai\u002Fmcp\u002Fintroduction","MCP 概述","Model Context Protocol 协议概述：架构、核心概念、应用场景",{"path":85,"title":86,"description":87},"\u002Fai\u002Fmcp\u002Fserver","MCP Server 开发","MCP Server 开发指南：资源、工具、提示词的实现",{"path":89,"title":90,"description":91},"\u002Fai\u002Fmcp\u002Ftools","MCP Tools 深入","深入理解 MCP Tools：与 Resources\u002FPrompts 的差异、Schema 设计、Annotations 与权限控制",{"path":93,"title":94,"description":95},"\u002Fai\u002Fprompt\u002Fadvanced","高级 Prompt 模式","高级 Prompt 设计模式：Tree-of-Thought、自我反思、多轮对话策略",{"path":97,"title":98,"description":99},"\u002Fai\u002Fprompt\u002Fbasics","Prompt 基础","Prompt Engineering 入门：基本概念、角色设定、输出格式控制",{"path":101,"title":102,"description":103},"\u002Fai\u002Fprompt\u002Ftechniques","提示词技巧","常用提示词技巧：Few-shot、Chain-of-Thought、ReAct",{"path":105,"title":106,"description":107},"\u002Fai\u002Frag\u002Fembedding","文本嵌入","文本嵌入模型：Embedding 原理、模型选择、相似度计算",{"path":109,"title":110,"description":111},"\u002Fai\u002Frag\u002Fintroduction","RAG 概述","检索增强生成（RAG）架构原理、优势与应用场景",{"path":113,"title":114,"description":115},"\u002Fai\u002Frag\u002Fpractice","RAG 实战","RAG 应用实战：文档问答系统、知识库搭建",{"path":117,"title":118,"description":119},"\u002Fai\u002Frag\u002Fvector-database","向量数据库","主流向量数据库对比：Milvus、Pinecone、Chroma、Weaviate",{"path":121,"title":122,"description":123},"\u002Fai\u002Fskills\u002Fbest-practices","Skill 最佳实践","编写高质量 Skill 的设计原则、常见陷阱与优化技巧",{"path":125,"title":126,"description":127},"\u002Fai\u002Fskills\u002Fcreating","创建自定义 Skill","从零编写一个可被 Agent 自动发现和调用的 Skill",{"path":129,"title":130,"description":131},"\u002Fai\u002Fskills\u002Fintroduction","Agent Skills 概述","Claude Agent Skills 概念、工作原理、与 Tools\u002FMCP 的区别",{"path":133,"title":134,"description":135},"\u002Fgolang\u002Fadvanced\u002Fconcurrency","Go - 并发深入","深入理解 Go 并发编程的核心机制。",{"path":137,"title":138,"description":139},"\u002Fgolang\u002Fadvanced\u002Fgc","Go - 垃圾回收","理解 Go 的垃圾回收机制，掌握 GC 调优方法。",{"path":141,"title":142,"description":143},"\u002Fgolang\u002Fadvanced\u002Fgmp","Go - GMP 调度模型","GMP 是 Go 运行时调度器的核心模型，理解它对于编写高性能 Go 程序至关重要。",{"path":145,"title":146,"description":147},"\u002Fgolang\u002Fadvanced\u002Fgo-concurrency","Go - 并发编程","Go 的并发是其核心特性之一，通过 Goroutine 和 Channel 实现。",{"path":149,"title":150,"description":151},"\u002Fgolang\u002Fadvanced\u002Fmemory","Go - 内存模型","理解 Go 的内存分配机制和内存模型。",{"path":153,"title":154,"description":155},"\u002Fgolang\u002Fadvanced\u002Fprofiling","Go - 性能分析","掌握 Go 的性能分析工具：pprof、trace、benchmark。",{"path":157,"title":158,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-basic","Go - 基础语法",{"path":160,"title":161,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-composite","Go - 复合类型",{"path":163,"title":164,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-control","Go - 流程控制",{"path":166,"title":167,"description":168},"\u002Fgolang\u002Fcore\u002Fgo-error","Go - 错误处理","Go 使用显式的错误返回值来处理错误，而不是异常机制。",{"path":170,"title":171,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-function","Go - 函数",{"path":173,"title":174,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-install","Go - 环境搭建",{"path":176,"title":177,"description":11},"\u002Fgolang\u002Fcore\u002Fgo-interface","Go - 接口",{"path":179,"title":180,"description":181},"\u002Fgolang\u002Fcore\u002Fgo-module","Go - 包管理","Go Modules 是 Go 1.11 引入的官方依赖管理方案，Go 1.16 后成为默认模式。",{"path":183,"title":184,"description":185},"\u002Fgolang\u002Fdistributed\u002Fgrpc","Go - gRPC","gRPC 是 Google 开发的高性能 RPC 框架，使用 Protocol Buffers 作为序列化协议。",{"path":187,"title":188,"description":189},"\u002Fgolang\u002Fdistributed\u002Fmicroservice","Go - 微服务","微服务架构的核心组件：服务发现、负载均衡、熔断降级。",{"path":191,"title":192,"description":193},"\u002Fgolang\u002Fdistributed\u002Fmq","Go - 消息队列","使用 Go 操作 Kafka 和 RabbitMQ。",{"path":195,"title":196,"description":197},"\u002Fgolang\u002Fdistributed\u002Fredis","Go - Redis","使用 go-redis 操作 Redis，实现缓存、分布式锁等功能。",{"path":199,"title":200,"description":201},"\u002Fgolang\u002Fengineering\u002Fconfig","Go - 配置管理","使用 viper 进行配置管理，支持多种配置格式和配置中心。",{"path":203,"title":204,"description":205},"\u002Fgolang\u002Fengineering\u002Fdocker","Go - Docker 部署","使用 Docker 容器化部署 Go 应用。",{"path":207,"title":208,"description":209},"\u002Fgolang\u002Fengineering\u002Fkubernetes","Go - Kubernetes 部署","在 Kubernetes 上部署和管理 Go 应用。",{"path":211,"title":212,"description":213},"\u002Fgolang\u002Fengineering\u002Flogging","Go - 日志系统","使用 zap 和 logrus 构建高性能结构化日志系统。",{"path":215,"title":216,"description":217},"\u002Fgolang\u002Fengineering\u002Ftesting","Go - 单元测试","Go 内置了强大的测试框架，掌握测试是编写高质量代码的基础。",{"path":219,"title":220,"description":221},"\u002Fgolang\u002Fstdlib\u002Fbufio","bufio","在 Go 语言中，bufio 包提供了带缓冲的 I\u002FO 操作，能够提高读写性能。以下是一些常用的 bufio 包 API 及其详细说明：",{"path":223,"title":224,"description":225},"\u002Fgolang\u002Fstdlib\u002Fcontainer","container","在Go语言标准库中，container 包提供了几种常用的数据结构实现，这些数据结构对于高效地管理和操作数据非常有用。以下是 container 包中主要的数据结构：",{"path":227,"title":228,"description":229},"\u002Fgolang\u002Fstdlib\u002Fcrypto","crypto","在 Go 语言中，crypto 包提供了一组用于加密和解密的功能。以下是一些常用的 crypto 包及其子包的 API 及其详细说明：",{"path":231,"title":232,"description":233},"\u002Fgolang\u002Fstdlib\u002Fencoding-csv","encoding\u002Fcsv","在 Go 语言中，encoding\u002Fcsv 包提供了对 CSV（逗号分隔值）文件进行读写的功能。以下是一些常用的 encoding\u002Fcsv 包的 API 及其详细说明：",{"path":235,"title":236,"description":237},"\u002Fgolang\u002Fstdlib\u002Fencoding-json","encoding\u002Fjson","在 Go 语言中，encoding\u002Fjson 包提供了对 JSON 数据进行编码和解码的功能。以下是一些常用的 encoding\u002Fjson 包的 API 及其详细说明：",{"path":239,"title":240,"description":241},"\u002Fgolang\u002Fstdlib\u002Fencoding-xml","encoding\u002Fxml","在 Go 语言中，encoding\u002Fxml 包提供了对 XML 数据进行编码和解码的功能。以下是一些常用的 encoding\u002Fxml 包的 API 及其详细说明：",{"path":243,"title":244,"description":245},"\u002Fgolang\u002Fstdlib\u002Fflag","flag","在Go语言中，flag 包是用于处理命令行参数的标准库，它提供了一种简单而直接的方式来解析和使用命令行参数。下面是关于 flag 包的一些基本介绍和常用功能：",{"path":247,"title":248,"description":249},"\u002Fgolang\u002Fstdlib\u002Ffmt","fmt","在 Go 语言的标准库中，fmt 包是非常重要的，它提供了处理格式化输入和输出的基本工具。以下是一些 fmt 包内常用的API：",{"path":251,"title":252,"description":253},"\u002Fgolang\u002Fstdlib\u002Fhttp","net\u002Fhttp","在 Go 语言中，net\u002Fhttp 包提供了用于构建 HTTP 客户端和服务器的强大工具。以下是一些常用的 net\u002Fhttp 包的 API 及其详细说明：",{"path":255,"title":256,"description":257},"\u002Fgolang\u002Fstdlib\u002Fio","io","在 Go 语言中，io 包提供了基本的输入输出功能。以下是一些常用的 io 包的 API 及其详细说明：",{"path":259,"title":260,"description":261},"\u002Fgolang\u002Fstdlib\u002Flog","log","在 Go 语言中，log 包提供了简单的日志记录功能。以下是一些常用的 log 包的 API 及其详细说明：",{"path":263,"title":264,"description":265},"\u002Fgolang\u002Fstdlib\u002Fmath","math","在 Go 语言中，math 包提供了基本的数学函数和常量。以下是一些常用的 math 包的 API 及其详细说明：",{"path":267,"title":268,"description":11},"\u002Fgolang\u002Fstdlib\u002Fnet","net",{"path":270,"title":271,"description":272},"\u002Fgolang\u002Fstdlib\u002Fos","os","在Go语言中，os 包是一个非常重要且常用的标准库，它提供了与操作系统交互的功能，包括文件操作、环境变量管理、进程管理等。下面是一些 os 包中常用的功能和API：",{"path":274,"title":275,"description":276},"\u002Fgolang\u002Fstdlib\u002Fsort","order","在 Go 语言中，sort 包提供了对切片和用户定义的集合进行排序的函数。它实现了常见的排序算法，如快速排序（Quicksort）和堆排序（Heapsort），并且为自定义集合提供了接口，使得用户可以根据特定的需求进行排序。",{"path":278,"title":279,"description":280},"\u002Fgolang\u002Fstdlib\u002Fstrconv","strconv","在 Go 语言中，strconv 包提供了字符串和基本数据类型之间的转换函数，例如将整数转换为字符串、字符串转换为整数，以及其他类型之间的转换。这些功能非常有用，特别是在处理用户输入或从外部数据源读取数据时。",{"path":282,"title":283,"description":284},"\u002Fgolang\u002Fstdlib\u002Ftime","time","在 Go 语言中，time 包提供了处理时间和日期的功能。以下是一些常用的 time 包的 API 及其详细说明：",{"path":286,"title":287,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Ferror","Gin - 错误处理",{"path":289,"title":290,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Ffile","Gin - 文件处理",{"path":292,"title":293,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Fmiddleware","Gin - 中间件",{"path":295,"title":296,"description":297},"\u002Fgolang\u002Fweb\u002Fgin\u002Fquickstart","Gin - 快速开始","Gin 是目前最流行的 Go Web 框架，以高性能和简洁 API 著称。",{"path":299,"title":300,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Frequest","Gin - 请求处理",{"path":302,"title":303,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Fresponse","Gin - 响应处理",{"path":305,"title":306,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Frouter","Gin - 路由",{"path":308,"title":309,"description":11},"\u002Fgolang\u002Fweb\u002Fgin\u002Fvalidation","Gin - 参数校验",{"path":311,"title":312,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fassociation","GORM - 关联关系",{"path":314,"title":315,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fcrud","GORM - CRUD 操作",{"path":317,"title":318,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fmodel","GORM - 模型定义",{"path":320,"title":321,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fperformance","GORM - 日志与性能",{"path":323,"title":324,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fquery","GORM - 高级查询",{"path":326,"title":327,"description":328},"\u002Fgolang\u002Fweb\u002Fgorm\u002Fquickstart","GORM - 快速开始","GORM 是 Go 语言最流行的 ORM 库，功能强大，使用简单。",{"path":330,"title":331,"description":11},"\u002Fgolang\u002Fweb\u002Fgorm\u002Ftransaction","GORM - 事务与 Hook",{"path":333,"title":334,"description":335},"\u002Finterview\u002Fbasic","计算机基础面经","本章节汇总了面试中常见的通用技术概念，不局限于特定语言或数据库，是考察技术内功的关键考点。",{"path":337,"title":338,"description":339},"\u002Finterview\u002Fgolang","Golang 面试题","Go 语言面试高频考点，覆盖基础语法、数据结构、并发编程、内存管理、GC、调度器等核心知识。",{"path":341,"title":342,"description":343},"\u002Finterview\u002Fk8s","Kubernetes 面试题","Kubernetes（K8s）面试高频考点，覆盖架构原理、核心资源、网络存储、调度策略、运维监控等核心知识。",{"path":345,"title":346,"description":347},"\u002Finterview\u002Fmysql","MySQL 面试题","MySQL 数据库面试高频考点，覆盖索引、事务、锁、优化、主从复制等核心知识。",{"path":349,"title":350,"description":351},"\u002Finterview\u002Fredis","Redis 面试题","Redis 面试高频考点，覆盖数据结构、持久化、集群、缓存一致性、性能优化等核心知识。",{"path":353,"title":354,"description":355},"\u002Finterview\u002Frocketmq","RocketMQ 面试题","RocketMQ 面试高频考点，覆盖消息模型、可靠性、顺序消息、事务消息、存储与高可用等核心知识。",{"path":357,"title":358,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Flist-arraylist","List - ArrayList 源码解析",{"path":360,"title":361,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Flist-linkedlist","List - LinkedList 源码解析",{"path":363,"title":364,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Flist-stack","List - Satck源码解析",{"path":366,"title":367,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Flist-vectore","List - Vector 源码解析",{"path":369,"title":370,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fmap-hashmap","Map - HashMap 源码解析",{"path":372,"title":373,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fmap-linkedhashmap","Map - LinkedHashMap 源码解析",{"path":375,"title":376,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fmap-treemap","Map - TreeMap 源码解析",{"path":378,"title":379,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fqueue-deque","Queue - Deque 接口解析",{"path":381,"title":382,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fqueue-queue","Queue - Queue 接口解析",{"path":384,"title":385,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fset-hashset","Set - HashSet源码解析",{"path":387,"title":388,"description":11},"\u002Fother\u002Fjava\u002Fcollection\u002Fset-linkedhashset","Set - LinkedHashSet 源码解析",{"path":390,"title":391,"description":392},"\u002Fother\u002Fjava\u002Fcollection\u002Fset-treeset","Set - TreeSet源码解析","TreeSet 是一个 Set 集合接口的实现类，与 HashSet 类似，其底层也是通过维护了一个 TreeMap 对象来封装了一些实现方法，故本篇不再对 TreeSet 的底层原理进行详细说明，仅对常用 API 做简单介绍，如需了解 TreeMap 的底层实现原理，请移步 Map - HashMap 源码解析",{"path":394,"title":395,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fannotation","Java核心 - 注解",{"path":397,"title":398,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fbasic-grammar","Java核心 - 基础语法",{"path":400,"title":401,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fclass-and-object","Java核心 - 面向对象",{"path":403,"title":404,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fcommon-classes","Java核心 - 常用类",{"path":406,"title":407,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fexception","Java核心 - 异常处理",{"path":409,"title":410,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fgenerics","Java核心 - 泛型",{"path":412,"title":413,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fjdk-env-path","Java核心 - 环境搭建",{"path":415,"title":416,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Freflection","Java核心 - 反射",{"path":418,"title":419,"description":11},"\u002Fother\u002Fjava\u002Fcore\u002Fstring","Java核心 - String 字符串",{"path":421,"title":422,"description":423},"\u002Fother\u002Fjava\u002Fio\u002Fbuffer-stream","Java IO - 缓冲流","缓冲流是对基本流的包装，通过内置缓冲区减少系统调用次数，大幅提升读写效率。",{"path":425,"title":426,"description":427},"\u002Fother\u002Fjava\u002Fio\u002Fbyte-stream","Java IO - 字节流","字节流是 Java IO 中最基本的流类型，以字节（byte）为单位进行数据读写，可以处理任意类型的文件。",{"path":429,"title":430,"description":431},"\u002Fother\u002Fjava\u002Fio\u002Fchar-stream","Java IO - 字符流","字符流以字符为单位进行读写，专门用于处理文本文件。相比字节流，字符流能够正确处理字符编码，避免中文乱码问题。",{"path":433,"title":434,"description":11},"\u002Fother\u002Fjava\u002Fio\u002Ffile","Java IO - File 类",{"path":436,"title":437,"description":11},"\u002Fother\u002Fjava\u002Fio\u002Fio-stream-system","Java IO - IO流概述",{"path":439,"title":440,"description":441},"\u002Fother\u002Fjava\u002Fio\u002Fnio","Java IO - NIO","NIO（New IO）是 JDK 1.4 引入的新 IO 模型，提供了更高效的 IO 操作方式，支持非阻塞 IO 和多路复用。",{"path":443,"title":444,"description":445},"\u002Fother\u002Fjava\u002Fjvm\u002Fclass-loading","类加载机制","类加载机制是 JVM 将 .class 文件加载到内存，并对数据进行校验、转换解析和初始化，最终形成可被 JVM 直接使用的 Java 类型的过程。",{"path":447,"title":448,"description":449},"\u002Fother\u002Fjava\u002Fjvm\u002Fgarbage-collection","垃圾回收","垃圾回收（Garbage Collection，GC）是 JVM 自动管理内存的机制，负责回收不再使用的对象所占用的内存。",{"path":451,"title":452,"description":453},"\u002Fother\u002Fjava\u002Fjvm\u002Fjvm-memory","JVM 内存结构","JVM 在执行 Java 程序时，会把它管理的内存划分为若干个不同的数据区域。这些区域有各自的用途、创建和销毁时间。",{"path":455,"title":456,"description":457},"\u002Fother\u002Fjava\u002Fjvm\u002Fjvm-tuning","JVM 调优","JVM 调优是优化 Java 应用性能的重要手段，主要包括参数配置、性能监控和问题排查。",{"path":459,"title":460,"description":461},"\u002Fother\u002Fjava\u002Fthread\u002Fatomic","原子类","Java 原子类（Atomic Classes）提供了一种无锁的线程安全方式，基于 CAS（Compare-And-Swap）操作实现。",{"path":463,"title":464,"description":465},"\u002Fother\u002Fjava\u002Fthread\u002Fcompletable-future","CompletableFuture","CompletableFuture 是 JDK 8 引入的异步编程工具，实现了 Future 和 CompletionStage 接口，支持函数式编程和链式调用。",{"path":467,"title":468,"description":469},"\u002Fother\u002Fjava\u002Fthread\u002Fconcurrent-collections","并发集合","Java 并发包提供了多种线程安全的集合类，用于替代传统的同步集合（如 Collections.synchronizedList）。",{"path":471,"title":472,"description":473},"\u002Fother\u002Fjava\u002Fthread\u002Fconcurrent-utils","并发工具类","Java 并发包提供了多种实用的并发工具类，用于控制线程之间的协调与同步。",{"path":475,"title":476,"description":11},"\u002Fother\u002Fjava\u002Fthread\u002Fsynchronized-lock","同步机制",{"path":478,"title":479,"description":11},"\u002Fother\u002Fjava\u002Fthread\u002Fthread-basic","线程基础",{"path":481,"title":482,"description":11},"\u002Fother\u002Fjava\u002Fthread\u002Fthread-pool","线程池",{"path":484,"title":485,"description":486},"\u002Fother\u002Fspring-series\u002Fspring\u002Fannotations-beans","Spring - 基于注解管理Bean","从 Java 5 开始，Java 增加了对注解（Annotation）的支持，它是代码中的一种特殊标记，可以在编译、类加载和运行时被读取，执行相应的处理。开发人员可以通过注解在不改变原有代码和逻辑的情况下，在源代码中嵌入补充信息。",{"path":488,"title":489,"description":490},"\u002Fother\u002Fspring-series\u002Fspring\u002Fimplement-ioc","Spring - 原理手写IoC","Spring 框架的 IOC 是基于 Java 反射机制实现的，在学习手写 IoC 之前，你需要具备一定的 Java 反射相关的知识，参考本站内的 Java 教程。",{"path":492,"title":493,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fintroduction-case","Spring - 入门案例",{"path":495,"title":496,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-aop","Spring - 面向切面AOP",{"path":498,"title":499,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-aot","Spring - AOT提前编译",{"path":501,"title":502,"description":503},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-data-validation","Spring - 数据校验","在开发中，我们经常遇到参数校验的需求，比如用户注册的时候，要校验用户名不能为空、用户名长度不超过20个字符、手机号是合法的手机号格式等等。如果使用普通方式，我们会把校验的代码和真正的业务处理逻辑耦合在一起，而且如果未来要新增一种校验逻辑也需要在修改多个地方。而spring validation允许通过注解的方式来定义对象校验规则，把校验和业务逻辑分离开，让代码编写更加方便。Spring Validation其实就是对Hibernate Validator进一步的封装，方便在Spring中使用。",{"path":505,"title":506,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-i18n","Spring - 国际化i18n",{"path":508,"title":509,"description":510},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-ioc","Spring - IOC容器","IoC 是 Inversion of Control 的简写，译为“控制反转”，它不是一门技术，而是一种设计思想，是一个重要的面向对象编程法则，能够指导我们如何设计出松耦合、更优良的程序。",{"path":512,"title":513,"description":514},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-junit","Spring - 单元测试JUnit","在之前的测试方法中，几乎都能看到以下的两行代码：",{"path":516,"title":517,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-resources","Spring - 资源操作",{"path":519,"title":520,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-summarize","Spring - Spring概述",{"path":522,"title":523,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fspring-transaction","Spring - 事务",{"path":525,"title":526,"description":11},"\u002Fother\u002Fspring-series\u002Fspring\u002Fxml-beans","Spring - 基于XML管理Bean",{"path":528,"title":529,"description":11},"\u002Fother\u002Fspring-series\u002Fspringboot\u002Fspringboot-config","SpringBoot - 配置详解",{"path":531,"title":532,"description":11},"\u002Fother\u002Fspring-series\u002Fspringboot\u002Fspringboot-data","SpringBoot - 数据访问",{"path":534,"title":535,"description":11},"\u002Fother\u002Fspring-series\u002Fspringboot\u002Fspringboot-quickstart","SpringBoot - 快速入门",{"path":537,"title":538,"description":11},"\u002Fother\u002Fspring-series\u002Fspringboot\u002Fspringboot-web","SpringBoot - Web 开发",{"path":540,"title":541,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-config","SpringCloud - 配置中心",{"path":543,"title":544,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-discovery","SpringCloud - 服务注册与发现",{"path":546,"title":547,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-feign","SpringCloud - 服务调用",{"path":549,"title":550,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-gateway","SpringCloud - 服务网关",{"path":552,"title":553,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-introduction","SpringCloud - 微服务概述",{"path":555,"title":556,"description":11},"\u002Fother\u002Fspring-series\u002Fspringcloud\u002Fspringcloud-sentinel","SpringCloud - 服务保护",{"path":558,"title":559,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-databind","SpringMVC - 数据绑定与转换",{"path":561,"title":562,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-exception","SpringMVC - 异常处理",{"path":564,"title":565,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-interceptor","SpringMVC - 拦截器",{"path":567,"title":568,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-introduction","SpringMVC - 简介与环境搭建",{"path":570,"title":571,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-request","SpringMVC - 请求处理",{"path":573,"title":574,"description":11},"\u002Fother\u002Fspring-series\u002Fspringmvc\u002Fspringmvc-response","SpringMVC - 响应处理",{"path":576,"title":577,"description":578},"\u002Fproject\u002Frocket-leaf\u002Farchitecture","项目架构","Rocket-Leaf 的目录结构、模块划分、数据流向，以及各层之间的依赖关系。",{"path":580,"title":581,"description":582},"\u002Fproject\u002Frocket-leaf\u002Fbackend-layers","后端分层设计","Rocket-Leaf 的 model \u002F rocketmq \u002F service 三层结构，以及服务之间的依赖关系与设计取舍。",{"path":584,"title":585,"description":586},"\u002Fproject\u002Frocket-leaf\u002Fclient-manager","RocketMQ 客户端管理器","AdminClientManager 的多客户端池、默认连接懒加载、自动重连重试的设计与实现。",{"path":588,"title":589,"description":590},"\u002Fproject\u002Frocket-leaf\u002Fencryption","连接信息加密存储","AES-256-GCM + SHA-256 字段级派生密钥的实现，以及如何在不破坏兼容性的前提下为历史明文数据做透明迁移。",{"path":592,"title":593,"description":594},"\u002Fproject\u002Frocket-leaf\u002Ffrontend","前端结构与类型绑定","React + Vite 目录组织、自动生成的 Wails 绑定、api 薄封装与自定义 hooks 的职责划分。",{"path":596,"title":597,"description":598},"\u002Fproject\u002Frocket-leaf","项目简介","Rocket-Leaf 是一款基于 Wails v3 构建的跨平台 RocketMQ 桌面管理客户端，Go 后端 + React 前端。本文档系列拆解它的架构与关键实现。",{"path":600,"title":601,"description":602},"\u002Fproject\u002Frocket-leaf\u002Fwails-v3","Wails v3 入门","Wails v3 的核心概念、Service 绑定机制，以及 Rocket-Leaf 是如何用它把 Go 后端和 React 前端打通的。",{"path":604,"title":605,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-basic","Docker - 入门基础",{"path":607,"title":608,"description":609},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-compose","Docker - Compose","在部署应用时，常常使用到不止一个容器，那么在部署容器的时候就需要一个一个进行部署，这样的部署过程也相对来说比较繁琐复杂，也容易出问题，那么有没有一种更为简单的方法呢？",{"path":611,"title":612,"description":613},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-container-connection","Docker - 容器互联","在上一个章节中我们学习了 Docker 容器的端口映射，可以将 Docker 容器和本地以及网络中的端口进行连接起来。",{"path":615,"title":616,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-dockerfile","Docker - Dockerfile",{"path":618,"title":619,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-helloworld","Docker - HelloWorld",{"path":621,"title":622,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-install","Docker - 安装",{"path":624,"title":625,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-introduce","Docker - 简介",{"path":627,"title":628,"description":11},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-object","Docker - 镜像、容器、仓库",{"path":630,"title":631,"description":632},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-warehouse","Docker - 仓库管理","仓库是集中存放资源的地方，代码仓库是存放代码的，那么Docker 中的仓库就是存放 Docker 镜像的。",{"path":634,"title":635,"description":636},"\u002Ftutorials\u002Fcloud\u002Fdocker\u002Fdocker-web-containers","Docker - WEB应用实例","在之前的章节中，仅对普通容器进行了演示，但在实际中常常使用到 Docker 容器中的 WEB 应用程序。",{"path":638,"title":639,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-config","Kubernetes - ConfigMap 与 Secret",{"path":641,"title":642,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-helm","Kubernetes - Helm 包管理",{"path":644,"title":645,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-install","Kubernetes - 集群安装",{"path":647,"title":648,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-introduction","Kubernetes - 简介与架构",{"path":650,"title":651,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-kubectl","Kubernetes - kubectl 命令行工具",{"path":653,"title":654,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-monitoring","Kubernetes - 监控与日志",{"path":656,"title":657,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-network-security","Kubernetes - 网络与安全",{"path":659,"title":660,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-service","Kubernetes - Service 与 Ingress",{"path":662,"title":663,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-storage","Kubernetes - 持久化存储",{"path":665,"title":666,"description":11},"\u002Ftutorials\u002Fcloud\u002Fkubernetes\u002Fk8s-workload","Kubernetes - 工作负载资源",{"path":668,"title":669,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-bash","Linux - Bash 基础语法",{"path":671,"title":672,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-file-directory","Linux - 文件与目录操作",{"path":674,"title":675,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-network","Linux - 网络配置",{"path":677,"title":678,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-package","Linux - 软件包管理",{"path":680,"title":681,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-process","Linux - 进程管理",{"path":683,"title":684,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-scripts","Linux - 常用脚本示例",{"path":686,"title":687,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-service","Linux - 服务管理",{"path":689,"title":690,"description":11},"\u002Ftutorials\u002Fcloud\u002Flinux\u002Flinux-user-permission","Linux - 用户与权限管理",{"path":692,"title":693,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-https","Nginx - HTTPS 配置",{"path":695,"title":696,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-install","Nginx - 安装与配置",{"path":698,"title":699,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-loadbalance","Nginx - 负载均衡",{"path":701,"title":702,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-optimization","Nginx - 性能优化",{"path":704,"title":705,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-proxy","Nginx - 反向代理",{"path":707,"title":708,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-static","Nginx - 静态资源服务",{"path":710,"title":711,"description":11},"\u002Ftutorials\u002Fcloud\u002Fnginx\u002Fnginx-vhost","Nginx - 虚拟主机配置",{"path":713,"title":714,"description":715},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fmysql-architecture","MySQL 高可用架构","主从复制、读写分离、分库分表。",{"path":717,"title":718,"description":719},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fmysql-index","MySQL 索引","索引是帮助 MySQL 高效获取数据的有序数据结构。",{"path":721,"title":722,"description":723},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fmysql-lock","MySQL 锁","锁用于解决并发访问时的数据一致性问题。",{"path":725,"title":726,"description":727},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fmysql-optimize","MySQL 性能优化","SQL 优化是后端开发必备技能。",{"path":729,"title":730,"description":731},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fmysql-transaction","MySQL 事务","事务是一组不可分割的操作，要么全部成功，要么全部失败。",{"path":733,"title":734,"description":735},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fsql-advanced","SQL 进阶","多表查询、子查询、函数、视图、存储过程。",{"path":737,"title":738,"description":739},"\u002Ftutorials\u002Fdatabase\u002Fmysql\u002Fsql-basic","SQL 基础","SQL（Structured Query Language）是操作关系型数据库的标准语言。",{"path":741,"title":742,"description":743},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-advanced","Redis 进阶功能","事务、发布订阅、Lua 脚本、Pipeline。",{"path":745,"title":746,"description":747},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-basic","Redis 基础","Redis 安装配置与基本命令。",{"path":749,"title":750,"description":751},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-cluster","Redis 高可用","主从复制、哨兵、Cluster 集群。",{"path":753,"title":754,"description":755},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-datatype","Redis 数据类型","Redis 5 种基本数据类型 + 4 种特殊类型。",{"path":757,"title":758,"description":759},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-optimize","Redis 性能优化","内存优化、缓存问题、最佳实践。",{"path":761,"title":762,"description":763},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-persistence","Redis 持久化","Redis 提供 RDB 和 AOF 两种持久化方式。",{"path":765,"title":766,"description":767},"\u002Ftutorials\u002Fdatabase\u002Fredis\u002Fredis-principle","Redis 底层原理","数据结构、线程模型、网络模型。",{"path":769,"title":770,"description":771},"\u002Ftutorials\u002Fdev-idea\u002Fdesign-patterns\u002Fbehaiver-patterns\u002Fobserver-pattern","观察者模式","观察者模式属于行为型模式，定义了对象之间的一对多的依赖关系，在这种模式中，当一个对象的状态发生变化时，所有依赖于它的对象都会得到通知，并且执行相关操作。观察者模式又被成为“发布—订阅模式”，即发布者发生改变后，会通知所有订阅者。",{"path":773,"title":774,"description":11},"\u002Ftutorials\u002Fdev-idea\u002Fdesign-patterns\u002Fcreate-patterns\u002Ffactory-pattern","工厂模式",{"path":776,"title":777,"description":778},"\u002Ftutorials\u002Fdev-idea\u002Fdesign-patterns\u002Fcreate-patterns\u002Fsingleton-pattern","单例模式","单例模式是最常用的设计模式之一，他可以保证在整个应用中，某个类只存在一个实例化对象，即全局使用到该类的只有一个对象，这种模式在需要限制某些类的实例数量时非常有用，通常全局只需要一个该对象即可，如一些配置文件映射对象、数据库连接对象等。",{"path":780,"title":781,"description":782},"\u002Ftutorials\u002Fdev-idea\u002Fdesign-patterns\u002Fstructural-patterns\u002Fadapter-pattern","适配器模式","适配器模式是一种结构型模式，可以将一个类的接口转换成客户端所期望的另一种接口，适配器模式可以帮助开发人员在不修改现有代码的情况下，将不兼容的类组合在一起。",{"path":784,"title":785,"description":786},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-basic-operations","Git 创建版本库","在 Git 上创建版本库有两种方式，一种是直接拷贝远程 Git 仓库到本地，另外一种是我们自己创建本地的版本库。",{"path":788,"title":789,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-branch-manage","Git 分支管理",{"path":791,"title":792,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-content-operations","Git 仓库内容操作",{"path":794,"title":795,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-introduce-install","Git 介绍和安装",{"path":797,"title":798,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-remote-manage","Git 远程管理",{"path":800,"title":801,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fgit\u002Fgit-workspace-index-repo","Git 工作原理",{"path":803,"title":804,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fhomebrew","HomeBrew 教程",{"path":806,"title":807,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fidea\u002Fshortcuts","快捷键",{"path":809,"title":810,"description":11},"\u002Ftutorials\u002Fdev-tools\u002Fmaven\u002Fintroduce-install-config","Maven - 介绍、安装、配置",{"path":812,"title":813,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fbasic-knowledge","2. 基础知识",{"path":815,"title":816,"description":817},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fcomponent-communication","9. 组件通信","在前面的章节内，介绍了 Vue 中最核心的内容——组件的介绍和使用，和 Java 等编程语言相反，组件并不近似于这些变成语言中的类，类可以通过类或者其实例化的对象来相互交互，但 Vue 组件之间的作用域是相互独立的，这就意味着不同组件之间的数据无法相互引用。",{"path":819,"title":820,"description":821},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fcomputed","4. 计算属性","虽然直接在模板中使用表达式方便，但是如果在模板中添加很多逻辑，会让模板变的臃肿且难维护，耦合度较高。有没有一种简单的方式来实现呢？答案是有的。",{"path":823,"title":824,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fcreate-vue-project","1. 环境搭建及安装",{"path":826,"title":827,"description":828},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Flife-cycle","6. 生命周期","生命周期是指组件从创建、挂载、更新到销毁的整个过程中所经历的一系列阶段。在 Vue 中，每个组件都有自己的生命周期，可以通过生命周期钩子函数来监听和处理组件在不同阶段的行为和状态。",{"path":830,"title":831,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fother-api","10. 其他 API",{"path":833,"title":834,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fpinia","8. Pinia",{"path":836,"title":837,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Frouter","7. 路由",{"path":839,"title":840,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Ftemplate-grammar","3. 指令及模板语法",{"path":842,"title":843,"description":11},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fvue3-new-component","11. Vue3 新组件",{"path":845,"title":846,"description":847},"\u002Ftutorials\u002Ffront-end\u002Fvue3\u002Fwatch","5. 监视","Watch 是 Vue 提供的一个用于监视响应式数据变化并执行相应操作的 API，能够对响应式数据的变化做出一些操作的功能。Vue3 中的 Watch 支持多种用法，包括监视响应式对象、ref 对象、数组、函数等。",{"path":849,"title":850,"description":11},"\u002Ftutorials\u002Fmq\u002Fkafka\u002Fkafka-introduction","Kafka 简介与安装",{"path":852,"title":853,"description":11},"\u002Ftutorials\u002Fmq\u002Fkafka\u002Fkafka-producer-consumer","Kafka 生产者与消费者",{"path":855,"title":856,"description":857},"\u002Ftutorials\u002Fmq\u002Fkafka\u002Fkafka-springboot","Spring Boot 整合 Kafka","Spring Kafka 提供了对 Apache Kafka 的便捷集成。",{"path":859,"title":860,"description":861},"\u002Ftutorials\u002Fmq\u002Frabbitmq\u002Frabbitmq-exchange","RabbitMQ Exchange 详解","Exchange（交换机）是 RabbitMQ 的核心组件，负责接收生产者发送的消息，并根据规则将消息路由到一个或多个队列。",{"path":863,"title":864,"description":11},"\u002Ftutorials\u002Fmq\u002Frabbitmq\u002Frabbitmq-introduction","RabbitMQ 简介与安装",{"path":866,"title":867,"description":868},"\u002Ftutorials\u002Fmq\u002Frabbitmq\u002Frabbitmq-reliability","RabbitMQ 消息可靠性","消息可靠性是消息队列的核心要求，RabbitMQ 提供了多种机制来保证消息不丢失。",{"path":870,"title":871,"description":872},"\u002Ftutorials\u002Fmq\u002Frabbitmq\u002Frabbitmq-springboot","Spring Boot 整合 RabbitMQ","Spring AMQP 提供了对 RabbitMQ 的便捷集成，大大简化了开发工作。",{"path":874,"title":875,"description":11},"\u002Ftutorials\u002Fmq\u002Frocketmq\u002Frocketmq-client","RocketMQ 客户端使用",{"path":877,"title":878,"description":11},"\u002Ftutorials\u002Fmq\u002Frocketmq\u002Frocketmq-concepts","RocketMQ 核心概念",{"path":880,"title":881,"description":11},"\u002Ftutorials\u002Fmq\u002Frocketmq\u002Frocketmq-installation","RocketMQ 安装部署",{"path":883,"title":884,"description":885},"\u002Ftutorials\u002Fmq\u002Frocketmq\u002Frocketmq-message-type","RocketMQ 消息类型","RocketMQ 支持多种消息类型，满足不同业务场景需求。",{"id":887,"title":58,"body":888,"description":59,"extension":3239,"meta":3240,"navigation":1038,"path":57,"seo":3241,"stem":3242,"__hash__":3243},"docs\u002Fai\u002Fllm\u002Ffine-tuning.md",{"type":889,"value":890,"toc":3196},"minimark",[891,895,899,902,937,941,944,954,958,961,965,973,976,1000,1240,1244,1248,1251,1254,1257,1260,1266,1269,1276,1343,1346,1374,1377,1573,1577,1581,1584,1587,1607,1610,1613,1651,1810,1813,1817,1820,1823,1826,1927,1930,1977,1980,2034,2037,2041,2044,2047,2052,2055,2060,2063,2069,2074,2077,2083,2086,2089,2093,2096,2099,2102,2149,2153,2225,2403,2406,2410,2413,2487,2491,2494,2576,2579,2596,2599,2619,2622,2625,2629,2640,2644,2816,2820,3009,3013,3024,3028,3039,3112,3115,3186,3189,3192],[892,893,894],"h2",{"id":894},"为什么要微调",[896,897,898],"p",{},"预训练大模型虽然已经具备了广泛的语言能力，但它们是在通用数据上训练的\"通才\"。在实际应用中，我们往往需要模型在特定领域或任务上表现得更好。微调（Fine-Tuning）就是在预训练模型的基础上，使用特定领域的数据继续训练，让模型变成某个领域的\"专家\"。",[896,900,901],{},"微调的常见动机：",[903,904,905,913,919,925,931],"ul",{},[906,907,908,912],"li",{},[909,910,911],"strong",{},"领域适配","：让模型掌握医疗、法律、金融等专业领域的知识和术语",[906,914,915,918],{},[909,916,917],{},"风格控制","：统一模型输出的语气、格式和风格",[906,920,921,924],{},[909,922,923],{},"任务优化","：针对特定任务（如信息抽取、分类、代码生成）优化效果",[906,926,927,930],{},[909,928,929],{},"降低成本","：微调后的小模型可能在特定任务上达到大模型的效果，从而降低推理成本",[906,932,933,936],{},[909,934,935],{},"数据隐私","：在私有数据上训练，确保敏感信息不通过 API 传输",[938,939,940],"note",{},"\n在决定微调之前，建议先尝试提示词工程（Prompt Engineering）和 RAG（检索增强生成）。如果这些方法无法满足需求，再考虑微调。微调需要较多的数据、算力和调参经验。\n",[892,942,943],{"id":943},"微调方法全景",[945,946,951],"pre",{"className":947,"code":949,"language":950},[948],"language-text","微调方法\n├── 全量微调（Full Fine-Tuning）\n│   └── 更新所有参数\n├── 参数高效微调（PEFT）\n│   ├── LoRA \u002F QLoRA\n│   ├── Adapter\n│   ├── Prefix Tuning\n│   └── Prompt Tuning\n└── 对齐训练\n    ├── SFT（监督微调）\n    ├── RLHF（人类反馈强化学习）\n    └── DPO（直接偏好优化）\n","text",[952,953,949],"code",{"__ignoreMap":11},[892,955,957],{"id":956},"全量微调full-fine-tuning","全量微调（Full Fine-Tuning）",[896,959,960],{},"全量微调是最直接的方式——更新模型的所有参数。",[962,963,964],"h3",{"id":964},"优点",[903,966,967,970],{},[906,968,969],{},"理论上能达到最好的效果",[906,971,972],{},"实现简单",[962,974,975],{"id":975},"缺点",[903,977,978,984,989,995],{},[906,979,980,983],{},[909,981,982],{},"显存需求巨大","：以 7B 模型为例，仅参数就需要约 14GB（FP16），加上优化器状态和梯度，总显存需求约 60-80GB",[906,985,986],{},[909,987,988],{},"训练时间长",[906,990,991,994],{},[909,992,993],{},"灾难性遗忘","：可能丢失预训练阶段学到的通用能力",[906,996,997],{},[909,998,999],{},"需要存储完整的模型副本",[945,1001,1005],{"className":1002,"code":1003,"language":1004,"meta":11,"style":11},"language-python shiki shiki-themes github-light github-light github-dark","# 全量微调的基本流程（以 Hugging Face Transformers 为例）\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer\n\nmodel = AutoModelForCausalLM.from_pretrained(\"meta-llama\u002FLlama-3-8B\")\ntokenizer = AutoTokenizer.from_pretrained(\"meta-llama\u002FLlama-3-8B\")\n\ntraining_args = TrainingArguments(\n    output_dir=\".\u002Foutput\",\n    num_train_epochs=3,\n    per_device_train_batch_size=4,\n    learning_rate=2e-5,\n    bf16=True,\n    gradient_accumulation_steps=4,\n)\n\ntrainer = Trainer(\n    model=model,\n    args=training_args,\n    train_dataset=train_dataset,\n)\n\ntrainer.train()\n","python",[952,1006,1007,1016,1033,1040,1059,1074,1079,1090,1105,1119,1132,1145,1158,1170,1175,1180,1191,1202,1213,1224,1229,1234],{"__ignoreMap":11},[1008,1009,1012],"span",{"class":1010,"line":1011},"line",1,[1008,1013,1015],{"class":1014},"sCsY4","# 全量微调的基本流程（以 Hugging Face Transformers 为例）\n",[1008,1017,1019,1023,1027,1030],{"class":1010,"line":1018},2,[1008,1020,1022],{"class":1021},"s8jYJ","from",[1008,1024,1026],{"class":1025},"sxrX7"," transformers ",[1008,1028,1029],{"class":1021},"import",[1008,1031,1032],{"class":1025}," AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer\n",[1008,1034,1036],{"class":1010,"line":1035},3,[1008,1037,1039],{"emptyLinePlaceholder":1038},true,"\n",[1008,1041,1043,1046,1049,1052,1056],{"class":1010,"line":1042},4,[1008,1044,1045],{"class":1025},"model ",[1008,1047,1048],{"class":1021},"=",[1008,1050,1051],{"class":1025}," AutoModelForCausalLM.from_pretrained(",[1008,1053,1055],{"class":1054},"sIIMD","\"meta-llama\u002FLlama-3-8B\"",[1008,1057,1058],{"class":1025},")\n",[1008,1060,1062,1065,1067,1070,1072],{"class":1010,"line":1061},5,[1008,1063,1064],{"class":1025},"tokenizer ",[1008,1066,1048],{"class":1021},[1008,1068,1069],{"class":1025}," AutoTokenizer.from_pretrained(",[1008,1071,1055],{"class":1054},[1008,1073,1058],{"class":1025},[1008,1075,1077],{"class":1010,"line":1076},6,[1008,1078,1039],{"emptyLinePlaceholder":1038},[1008,1080,1082,1085,1087],{"class":1010,"line":1081},7,[1008,1083,1084],{"class":1025},"training_args ",[1008,1086,1048],{"class":1021},[1008,1088,1089],{"class":1025}," TrainingArguments(\n",[1008,1091,1093,1097,1099,1102],{"class":1010,"line":1092},8,[1008,1094,1096],{"class":1095},"sP4rz","    output_dir",[1008,1098,1048],{"class":1021},[1008,1100,1101],{"class":1054},"\".\u002Foutput\"",[1008,1103,1104],{"class":1025},",\n",[1008,1106,1108,1111,1113,1117],{"class":1010,"line":1107},9,[1008,1109,1110],{"class":1095},"    num_train_epochs",[1008,1112,1048],{"class":1021},[1008,1114,1116],{"class":1115},"sBjJW","3",[1008,1118,1104],{"class":1025},[1008,1120,1122,1125,1127,1130],{"class":1010,"line":1121},10,[1008,1123,1124],{"class":1095},"    per_device_train_batch_size",[1008,1126,1048],{"class":1021},[1008,1128,1129],{"class":1115},"4",[1008,1131,1104],{"class":1025},[1008,1133,1135,1138,1140,1143],{"class":1010,"line":1134},11,[1008,1136,1137],{"class":1095},"    learning_rate",[1008,1139,1048],{"class":1021},[1008,1141,1142],{"class":1115},"2e-5",[1008,1144,1104],{"class":1025},[1008,1146,1148,1151,1153,1156],{"class":1010,"line":1147},12,[1008,1149,1150],{"class":1095},"    bf16",[1008,1152,1048],{"class":1021},[1008,1154,1155],{"class":1115},"True",[1008,1157,1104],{"class":1025},[1008,1159,1161,1164,1166,1168],{"class":1010,"line":1160},13,[1008,1162,1163],{"class":1095},"    gradient_accumulation_steps",[1008,1165,1048],{"class":1021},[1008,1167,1129],{"class":1115},[1008,1169,1104],{"class":1025},[1008,1171,1173],{"class":1010,"line":1172},14,[1008,1174,1058],{"class":1025},[1008,1176,1178],{"class":1010,"line":1177},15,[1008,1179,1039],{"emptyLinePlaceholder":1038},[1008,1181,1183,1186,1188],{"class":1010,"line":1182},16,[1008,1184,1185],{"class":1025},"trainer ",[1008,1187,1048],{"class":1021},[1008,1189,1190],{"class":1025}," Trainer(\n",[1008,1192,1194,1197,1199],{"class":1010,"line":1193},17,[1008,1195,1196],{"class":1095},"    model",[1008,1198,1048],{"class":1021},[1008,1200,1201],{"class":1025},"model,\n",[1008,1203,1205,1208,1210],{"class":1010,"line":1204},18,[1008,1206,1207],{"class":1095},"    args",[1008,1209,1048],{"class":1021},[1008,1211,1212],{"class":1025},"training_args,\n",[1008,1214,1216,1219,1221],{"class":1010,"line":1215},19,[1008,1217,1218],{"class":1095},"    train_dataset",[1008,1220,1048],{"class":1021},[1008,1222,1223],{"class":1025},"train_dataset,\n",[1008,1225,1227],{"class":1010,"line":1226},20,[1008,1228,1058],{"class":1025},[1008,1230,1232],{"class":1010,"line":1231},21,[1008,1233,1039],{"emptyLinePlaceholder":1038},[1008,1235,1237],{"class":1010,"line":1236},22,[1008,1238,1239],{"class":1025},"trainer.train()\n",[1241,1242,1243],"warning",{},"\n全量微调 7B 模型至少需要 1 张 80GB 显存的 A100 GPU。对于大多数开发者和中小企业来说，参数高效微调（PEFT）是更务实的选择。\n",[892,1245,1247],{"id":1246},"loralow-rank-adaptation","LoRA（Low-Rank Adaptation）",[896,1249,1250],{},"LoRA 是目前最流行的参数高效微调方法，由微软在 2021 年提出。",[962,1252,1253],{"id":1253},"核心原理",[896,1255,1256],{},"LoRA 的核心思想是：微调时模型权重的变化量 ΔW 是低秩的，不需要更新完整的权重矩阵。",[896,1258,1259],{},"具体做法是将权重变化分解为两个低秩矩阵的乘积：",[945,1261,1264],{"className":1262,"code":1263,"language":950},[948],"W' = W + ΔW = W + A × B\n\n其中：\n- W: 原始预训练权重（冻结不更新），形状 [d, k]\n- A: 低秩矩阵，形状 [d, r]，随机初始化\n- B: 低秩矩阵，形状 [r, k]，初始化为 0\n- r: 秩（rank），远小于 d 和 k，通常取 8-64\n",[952,1265,1263],{"__ignoreMap":11},[962,1267,1268],{"id":1268},"参数量对比",[896,1270,1271,1272,1275],{},"以一个 ",[952,1273,1274],{},"[4096, 4096]"," 的权重矩阵为例：",[1277,1278,1279,1295],"table",{},[1280,1281,1282],"thead",{},[1283,1284,1285,1289,1292],"tr",{},[1286,1287,1288],"th",{},"方式",[1286,1290,1291],{},"可训练参数量",[1286,1293,1294],{},"比例",[1296,1297,1298,1310,1321,1332],"tbody",{},[1283,1299,1300,1304,1307],{},[1301,1302,1303],"td",{},"全量微调",[1301,1305,1306],{},"16,777,216",[1301,1308,1309],{},"100%",[1283,1311,1312,1315,1318],{},[1301,1313,1314],{},"LoRA (r=8)",[1301,1316,1317],{},"65,536",[1301,1319,1320],{},"0.39%",[1283,1322,1323,1326,1329],{},[1301,1324,1325],{},"LoRA (r=16)",[1301,1327,1328],{},"131,072",[1301,1330,1331],{},"0.78%",[1283,1333,1334,1337,1340],{},[1301,1335,1336],{},"LoRA (r=64)",[1301,1338,1339],{},"524,288",[1301,1341,1342],{},"3.12%",[962,1344,1345],{"id":1345},"关键超参数",[903,1347,1348,1354,1368],{},[906,1349,1350,1353],{},[909,1351,1352],{},"rank (r)","：秩的大小，决定了 LoRA 的表达能力。通常取 8-64，任务越复杂可以适当增大",[906,1355,1356,1359,1360,1363,1364,1367],{},[909,1357,1358],{},"alpha","：缩放因子，最终 ΔW 会乘以 ",[952,1361,1362],{},"alpha\u002Fr","。通常设为 ",[952,1365,1366],{},"r"," 的 1-2 倍",[906,1369,1370,1373],{},[909,1371,1372],{},"target_modules","：要应用 LoRA 的模块，通常选择注意力层的 Q、K、V、O 投影矩阵",[962,1375,1376],{"id":1376},"代码示例",[945,1378,1380],{"className":1002,"code":1379,"language":1004,"meta":11,"style":11},"from peft import LoraConfig, get_peft_model, TaskType\n\n# 配置 LoRA\nlora_config = LoraConfig(\n    task_type=TaskType.CAUSAL_LM,\n    r=16,                              # 秩\n    lora_alpha=32,                     # 缩放因子\n    lora_dropout=0.05,                 # Dropout\n    target_modules=[                   # 目标模块\n        \"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\",\n        \"gate_proj\", \"up_proj\", \"down_proj\"\n    ],\n)\n\n# 应用 LoRA\nmodel = get_peft_model(model, lora_config)\n\n# 查看可训练参数\nmodel.print_trainable_parameters()\n# 输出示例：trainable params: 41,943,040 || all params: 8,072,204,288 || trainable%: 0.52%\n",[952,1381,1382,1394,1398,1403,1413,1428,1444,1460,1476,1489,1512,1527,1532,1536,1540,1545,1554,1558,1563,1568],{"__ignoreMap":11},[1008,1383,1384,1386,1389,1391],{"class":1010,"line":1011},[1008,1385,1022],{"class":1021},[1008,1387,1388],{"class":1025}," peft ",[1008,1390,1029],{"class":1021},[1008,1392,1393],{"class":1025}," LoraConfig, get_peft_model, TaskType\n",[1008,1395,1396],{"class":1010,"line":1018},[1008,1397,1039],{"emptyLinePlaceholder":1038},[1008,1399,1400],{"class":1010,"line":1035},[1008,1401,1402],{"class":1014},"# 配置 LoRA\n",[1008,1404,1405,1408,1410],{"class":1010,"line":1042},[1008,1406,1407],{"class":1025},"lora_config ",[1008,1409,1048],{"class":1021},[1008,1411,1412],{"class":1025}," LoraConfig(\n",[1008,1414,1415,1418,1420,1423,1426],{"class":1010,"line":1061},[1008,1416,1417],{"class":1095},"    task_type",[1008,1419,1048],{"class":1021},[1008,1421,1422],{"class":1025},"TaskType.",[1008,1424,1425],{"class":1115},"CAUSAL_LM",[1008,1427,1104],{"class":1025},[1008,1429,1430,1433,1435,1438,1441],{"class":1010,"line":1076},[1008,1431,1432],{"class":1095},"    r",[1008,1434,1048],{"class":1021},[1008,1436,1437],{"class":1115},"16",[1008,1439,1440],{"class":1025},",                              ",[1008,1442,1443],{"class":1014},"# 秩\n",[1008,1445,1446,1449,1451,1454,1457],{"class":1010,"line":1081},[1008,1447,1448],{"class":1095},"    lora_alpha",[1008,1450,1048],{"class":1021},[1008,1452,1453],{"class":1115},"32",[1008,1455,1456],{"class":1025},",                     ",[1008,1458,1459],{"class":1014},"# 缩放因子\n",[1008,1461,1462,1465,1467,1470,1473],{"class":1010,"line":1092},[1008,1463,1464],{"class":1095},"    lora_dropout",[1008,1466,1048],{"class":1021},[1008,1468,1469],{"class":1115},"0.05",[1008,1471,1472],{"class":1025},",                 ",[1008,1474,1475],{"class":1014},"# Dropout\n",[1008,1477,1478,1481,1483,1486],{"class":1010,"line":1107},[1008,1479,1480],{"class":1095},"    target_modules",[1008,1482,1048],{"class":1021},[1008,1484,1485],{"class":1025},"[                   ",[1008,1487,1488],{"class":1014},"# 目标模块\n",[1008,1490,1491,1494,1497,1500,1502,1505,1507,1510],{"class":1010,"line":1121},[1008,1492,1493],{"class":1054},"        \"q_proj\"",[1008,1495,1496],{"class":1025},", ",[1008,1498,1499],{"class":1054},"\"k_proj\"",[1008,1501,1496],{"class":1025},[1008,1503,1504],{"class":1054},"\"v_proj\"",[1008,1506,1496],{"class":1025},[1008,1508,1509],{"class":1054},"\"o_proj\"",[1008,1511,1104],{"class":1025},[1008,1513,1514,1517,1519,1522,1524],{"class":1010,"line":1134},[1008,1515,1516],{"class":1054},"        \"gate_proj\"",[1008,1518,1496],{"class":1025},[1008,1520,1521],{"class":1054},"\"up_proj\"",[1008,1523,1496],{"class":1025},[1008,1525,1526],{"class":1054},"\"down_proj\"\n",[1008,1528,1529],{"class":1010,"line":1147},[1008,1530,1531],{"class":1025},"    ],\n",[1008,1533,1534],{"class":1010,"line":1160},[1008,1535,1058],{"class":1025},[1008,1537,1538],{"class":1010,"line":1172},[1008,1539,1039],{"emptyLinePlaceholder":1038},[1008,1541,1542],{"class":1010,"line":1177},[1008,1543,1544],{"class":1014},"# 应用 LoRA\n",[1008,1546,1547,1549,1551],{"class":1010,"line":1182},[1008,1548,1045],{"class":1025},[1008,1550,1048],{"class":1021},[1008,1552,1553],{"class":1025}," get_peft_model(model, lora_config)\n",[1008,1555,1556],{"class":1010,"line":1193},[1008,1557,1039],{"emptyLinePlaceholder":1038},[1008,1559,1560],{"class":1010,"line":1204},[1008,1561,1562],{"class":1014},"# 查看可训练参数\n",[1008,1564,1565],{"class":1010,"line":1215},[1008,1566,1567],{"class":1025},"model.print_trainable_parameters()\n",[1008,1569,1570],{"class":1010,"line":1226},[1008,1571,1572],{"class":1014},"# 输出示例：trainable params: 41,943,040 || all params: 8,072,204,288 || trainable%: 0.52%\n",[1574,1575,1576],"tip",{},"\nLoRA 训练完成后，产生的适配器文件通常只有几十 MB，非常方便保存和分发。你可以为同一个基础模型训练多个 LoRA 适配器，在推理时按需加载。\n",[892,1578,1580],{"id":1579},"qloraquantized-lora","QLoRA（Quantized LoRA）",[896,1582,1583],{},"QLoRA 在 LoRA 的基础上引入了量化技术，进一步降低了显存需求。",[962,1585,1586],{"id":1586},"核心创新",[903,1588,1589,1595,1601],{},[906,1590,1591,1594],{},[909,1592,1593],{},"4-bit NormalFloat 量化","：将基础模型以 4-bit 精度加载，显存占用降为 FP16 的约 1\u002F4",[906,1596,1597,1600],{},[909,1598,1599],{},"双重量化（Double Quantization）","：对量化常数本身也进行量化，进一步节省显存",[906,1602,1603,1606],{},[909,1604,1605],{},"分页优化器（Paged Optimizers）","：利用 CPU 内存处理显存溢出",[962,1608,1609],{"id":1609},"显存对比",[896,1611,1612],{},"以 Llama 3 8B 模型为例：",[1277,1614,1615,1625],{},[1280,1616,1617],{},[1283,1618,1619,1622],{},[1286,1620,1621],{},"微调方式",[1286,1623,1624],{},"显存需求",[1296,1626,1627,1635,1643],{},[1283,1628,1629,1632],{},[1301,1630,1631],{},"全量微调 (FP16)",[1301,1633,1634],{},"~60 GB",[1283,1636,1637,1640],{},[1301,1638,1639],{},"LoRA (FP16)",[1301,1641,1642],{},"~18 GB",[1283,1644,1645,1648],{},[1301,1646,1647],{},"QLoRA (4-bit)",[1301,1649,1650],{},"~6 GB",[945,1652,1654],{"className":1002,"code":1653,"language":1004,"meta":11,"style":11},"from transformers import BitsAndBytesConfig\n\n# 配置 4-bit 量化\nbnb_config = BitsAndBytesConfig(\n    load_in_4bit=True,\n    bnb_4bit_quant_type=\"nf4\",          # NormalFloat4 量化\n    bnb_4bit_compute_dtype=torch.bfloat16,\n    bnb_4bit_use_double_quant=True,      # 双重量化\n)\n\n# 加载量化模型\nmodel = AutoModelForCausalLM.from_pretrained(\n    \"meta-llama\u002FLlama-3-8B\",\n    quantization_config=bnb_config,\n    device_map=\"auto\",\n)\n\n# 然后像普通 LoRA 一样应用 PEFT\nmodel = get_peft_model(model, lora_config)\n",[952,1655,1656,1667,1671,1676,1686,1697,1713,1723,1738,1742,1746,1751,1760,1767,1777,1789,1793,1797,1802],{"__ignoreMap":11},[1008,1657,1658,1660,1662,1664],{"class":1010,"line":1011},[1008,1659,1022],{"class":1021},[1008,1661,1026],{"class":1025},[1008,1663,1029],{"class":1021},[1008,1665,1666],{"class":1025}," BitsAndBytesConfig\n",[1008,1668,1669],{"class":1010,"line":1018},[1008,1670,1039],{"emptyLinePlaceholder":1038},[1008,1672,1673],{"class":1010,"line":1035},[1008,1674,1675],{"class":1014},"# 配置 4-bit 量化\n",[1008,1677,1678,1681,1683],{"class":1010,"line":1042},[1008,1679,1680],{"class":1025},"bnb_config ",[1008,1682,1048],{"class":1021},[1008,1684,1685],{"class":1025}," BitsAndBytesConfig(\n",[1008,1687,1688,1691,1693,1695],{"class":1010,"line":1061},[1008,1689,1690],{"class":1095},"    load_in_4bit",[1008,1692,1048],{"class":1021},[1008,1694,1155],{"class":1115},[1008,1696,1104],{"class":1025},[1008,1698,1699,1702,1704,1707,1710],{"class":1010,"line":1076},[1008,1700,1701],{"class":1095},"    bnb_4bit_quant_type",[1008,1703,1048],{"class":1021},[1008,1705,1706],{"class":1054},"\"nf4\"",[1008,1708,1709],{"class":1025},",          ",[1008,1711,1712],{"class":1014},"# NormalFloat4 量化\n",[1008,1714,1715,1718,1720],{"class":1010,"line":1081},[1008,1716,1717],{"class":1095},"    bnb_4bit_compute_dtype",[1008,1719,1048],{"class":1021},[1008,1721,1722],{"class":1025},"torch.bfloat16,\n",[1008,1724,1725,1728,1730,1732,1735],{"class":1010,"line":1092},[1008,1726,1727],{"class":1095},"    bnb_4bit_use_double_quant",[1008,1729,1048],{"class":1021},[1008,1731,1155],{"class":1115},[1008,1733,1734],{"class":1025},",      ",[1008,1736,1737],{"class":1014},"# 双重量化\n",[1008,1739,1740],{"class":1010,"line":1107},[1008,1741,1058],{"class":1025},[1008,1743,1744],{"class":1010,"line":1121},[1008,1745,1039],{"emptyLinePlaceholder":1038},[1008,1747,1748],{"class":1010,"line":1134},[1008,1749,1750],{"class":1014},"# 加载量化模型\n",[1008,1752,1753,1755,1757],{"class":1010,"line":1147},[1008,1754,1045],{"class":1025},[1008,1756,1048],{"class":1021},[1008,1758,1759],{"class":1025}," AutoModelForCausalLM.from_pretrained(\n",[1008,1761,1762,1765],{"class":1010,"line":1160},[1008,1763,1764],{"class":1054},"    \"meta-llama\u002FLlama-3-8B\"",[1008,1766,1104],{"class":1025},[1008,1768,1769,1772,1774],{"class":1010,"line":1172},[1008,1770,1771],{"class":1095},"    quantization_config",[1008,1773,1048],{"class":1021},[1008,1775,1776],{"class":1025},"bnb_config,\n",[1008,1778,1779,1782,1784,1787],{"class":1010,"line":1177},[1008,1780,1781],{"class":1095},"    device_map",[1008,1783,1048],{"class":1021},[1008,1785,1786],{"class":1054},"\"auto\"",[1008,1788,1104],{"class":1025},[1008,1790,1791],{"class":1010,"line":1182},[1008,1792,1058],{"class":1025},[1008,1794,1795],{"class":1010,"line":1193},[1008,1796,1039],{"emptyLinePlaceholder":1038},[1008,1798,1799],{"class":1010,"line":1204},[1008,1800,1801],{"class":1014},"# 然后像普通 LoRA 一样应用 PEFT\n",[1008,1803,1804,1806,1808],{"class":1010,"line":1215},[1008,1805,1045],{"class":1025},[1008,1807,1048],{"class":1021},[1008,1809,1553],{"class":1025},[938,1811,1812],{},"\nQLoRA 让在消费级显卡（如 RTX 4090 24GB）上微调 7-8B 模型成为现实。对于 70B 级别的模型，QLoRA 也只需要约 40GB 显存。\n",[892,1814,1816],{"id":1815},"sft监督微调","SFT（监督微调）",[896,1818,1819],{},"SFT（Supervised Fine-Tuning）是使模型学会遵循指令的关键步骤。它使用\"指令-回答\"对作为训练数据，教会模型按用户的要求回答问题。",[962,1821,1822],{"id":1822},"数据格式",[896,1824,1825],{},"SFT 数据通常采用对话格式：",[945,1827,1831],{"className":1828,"code":1829,"language":1830,"meta":11,"style":11},"language-json shiki shiki-themes github-light github-light github-dark","{\n  \"messages\": [\n    {\"role\": \"system\", \"content\": \"你是一个专业的医疗助手。\"},\n    {\"role\": \"user\", \"content\": \"什么是高血压？\"},\n    {\"role\": \"assistant\", \"content\": \"高血压是指动脉血压持续升高的慢性疾病...\"}\n  ]\n}\n","json",[952,1832,1833,1838,1846,1873,1895,1918,1923],{"__ignoreMap":11},[1008,1834,1835],{"class":1010,"line":1011},[1008,1836,1837],{"class":1025},"{\n",[1008,1839,1840,1843],{"class":1010,"line":1018},[1008,1841,1842],{"class":1115},"  \"messages\"",[1008,1844,1845],{"class":1025},": [\n",[1008,1847,1848,1851,1854,1857,1860,1862,1865,1867,1870],{"class":1010,"line":1035},[1008,1849,1850],{"class":1025},"    {",[1008,1852,1853],{"class":1115},"\"role\"",[1008,1855,1856],{"class":1025},": ",[1008,1858,1859],{"class":1054},"\"system\"",[1008,1861,1496],{"class":1025},[1008,1863,1864],{"class":1115},"\"content\"",[1008,1866,1856],{"class":1025},[1008,1868,1869],{"class":1054},"\"你是一个专业的医疗助手。\"",[1008,1871,1872],{"class":1025},"},\n",[1008,1874,1875,1877,1879,1881,1884,1886,1888,1890,1893],{"class":1010,"line":1042},[1008,1876,1850],{"class":1025},[1008,1878,1853],{"class":1115},[1008,1880,1856],{"class":1025},[1008,1882,1883],{"class":1054},"\"user\"",[1008,1885,1496],{"class":1025},[1008,1887,1864],{"class":1115},[1008,1889,1856],{"class":1025},[1008,1891,1892],{"class":1054},"\"什么是高血压？\"",[1008,1894,1872],{"class":1025},[1008,1896,1897,1899,1901,1903,1906,1908,1910,1912,1915],{"class":1010,"line":1061},[1008,1898,1850],{"class":1025},[1008,1900,1853],{"class":1115},[1008,1902,1856],{"class":1025},[1008,1904,1905],{"class":1054},"\"assistant\"",[1008,1907,1496],{"class":1025},[1008,1909,1864],{"class":1115},[1008,1911,1856],{"class":1025},[1008,1913,1914],{"class":1054},"\"高血压是指动脉血压持续升高的慢性疾病...\"",[1008,1916,1917],{"class":1025},"}\n",[1008,1919,1920],{"class":1010,"line":1076},[1008,1921,1922],{"class":1025},"  ]\n",[1008,1924,1925],{"class":1010,"line":1081},[1008,1926,1917],{"class":1025},[896,1928,1929],{},"或者简单的指令格式：",[945,1931,1933],{"className":1828,"code":1932,"language":1830,"meta":11,"style":11},"{\n  \"instruction\": \"将以下文本翻译成英文\",\n  \"input\": \"今天天气很好\",\n  \"output\": \"The weather is very nice today.\"\n}\n",[952,1934,1935,1939,1951,1963,1973],{"__ignoreMap":11},[1008,1936,1937],{"class":1010,"line":1011},[1008,1938,1837],{"class":1025},[1008,1940,1941,1944,1946,1949],{"class":1010,"line":1018},[1008,1942,1943],{"class":1115},"  \"instruction\"",[1008,1945,1856],{"class":1025},[1008,1947,1948],{"class":1054},"\"将以下文本翻译成英文\"",[1008,1950,1104],{"class":1025},[1008,1952,1953,1956,1958,1961],{"class":1010,"line":1035},[1008,1954,1955],{"class":1115},"  \"input\"",[1008,1957,1856],{"class":1025},[1008,1959,1960],{"class":1054},"\"今天天气很好\"",[1008,1962,1104],{"class":1025},[1008,1964,1965,1968,1970],{"class":1010,"line":1042},[1008,1966,1967],{"class":1115},"  \"output\"",[1008,1969,1856],{"class":1025},[1008,1971,1972],{"class":1054},"\"The weather is very nice today.\"\n",[1008,1974,1975],{"class":1010,"line":1061},[1008,1976,1917],{"class":1025},[962,1978,1979],{"id":1979},"数据质量要点",[1277,1981,1982,1992],{},[1280,1983,1984],{},[1283,1985,1986,1989],{},[1286,1987,1988],{},"维度",[1286,1990,1991],{},"要求",[1296,1993,1994,2002,2010,2018,2026],{},[1283,1995,1996,1999],{},[1301,1997,1998],{},"数量",[1301,2000,2001],{},"通常 1K-100K 条即可见效",[1283,2003,2004,2007],{},[1301,2005,2006],{},"多样性",[1301,2008,2009],{},"覆盖目标场景的各种情况",[1283,2011,2012,2015],{},[1301,2013,2014],{},"质量",[1301,2016,2017],{},"高质量的回答比大量低质量数据更重要",[1283,2019,2020,2023],{},[1301,2021,2022],{},"一致性",[1301,2024,2025],{},"风格、格式保持统一",[1283,2027,2028,2031],{},[1301,2029,2030],{},"长度分布",[1301,2032,2033],{},"包含不同长度的回答",[1574,2035,2036],{},"\n数据质量远比数量重要。研究表明，精心构建的 1000 条高质量数据的微调效果，可能优于 10 万条普通质量的数据。在准备数据时，应把大部分精力放在质量把控上。\n",[892,2038,2040],{"id":2039},"rlhf基于人类反馈的强化学习","RLHF（基于人类反馈的强化学习）",[896,2042,2043],{},"RLHF 是让模型更好地对齐人类偏好的关键技术，ChatGPT 的成功很大程度上归功于这项技术。",[962,2045,2046],{"id":2046},"三个阶段",[896,2048,2049],{},[909,2050,2051],{},"阶段一：SFT",[896,2053,2054],{},"在高质量指令数据上进行监督微调，得到初始的对话模型。",[896,2056,2057],{},[909,2058,2059],{},"阶段二：训练奖励模型（Reward Model）",[896,2061,2062],{},"收集人类标注数据，对模型的多个回答进行排序（如 A > B > C），训练一个奖励模型来自动评估回答质量。",[945,2064,2067],{"className":2065,"code":2066,"language":950},[948],"用户问题 + 模型回答 → 奖励模型 → 分数（0-1）\n",[952,2068,2066],{"__ignoreMap":11},[896,2070,2071],{},[909,2072,2073],{},"阶段三：PPO 强化学习训练",[896,2075,2076],{},"使用 PPO（Proximal Policy Optimization）算法，以奖励模型的打分为信号，进一步优化语言模型。",[945,2078,2081],{"className":2079,"code":2080,"language":950},[948],"目标：最大化 Reward(回答) - β × KL(新模型 || SFT模型)\n",[952,2082,2080],{"__ignoreMap":11},[896,2084,2085],{},"其中 KL 惩罚项防止模型偏离 SFT 模型太远。",[1241,2087,2088],{},"\nRLHF 流程复杂、训练不稳定，需要大量人类标注数据和丰富的调参经验。对于大多数实际项目，DPO 是一个更简单实用的替代方案。\n",[892,2090,2092],{"id":2091},"dpo直接偏好优化","DPO（直接偏好优化）",[896,2094,2095],{},"DPO（Direct Preference Optimization）是 RLHF 的简化替代方案，无需单独训练奖励模型。",[962,2097,2098],{"id":2098},"核心思想",[896,2100,2101],{},"DPO 直接利用偏好数据（chosen\u002Frejected 对）来优化模型，将 RLHF 的三步流程简化为一步：",[945,2103,2105],{"className":1828,"code":2104,"language":1830,"meta":11,"style":11},"{\n  \"prompt\": \"解释量子计算的基本原理\",\n  \"chosen\": \"量子计算利用量子力学的叠加态和纠缠态...\",\n  \"rejected\": \"量子计算就是很快的计算机...\"\n}\n",[952,2106,2107,2111,2123,2135,2145],{"__ignoreMap":11},[1008,2108,2109],{"class":1010,"line":1011},[1008,2110,1837],{"class":1025},[1008,2112,2113,2116,2118,2121],{"class":1010,"line":1018},[1008,2114,2115],{"class":1115},"  \"prompt\"",[1008,2117,1856],{"class":1025},[1008,2119,2120],{"class":1054},"\"解释量子计算的基本原理\"",[1008,2122,1104],{"class":1025},[1008,2124,2125,2128,2130,2133],{"class":1010,"line":1035},[1008,2126,2127],{"class":1115},"  \"chosen\"",[1008,2129,1856],{"class":1025},[1008,2131,2132],{"class":1054},"\"量子计算利用量子力学的叠加态和纠缠态...\"",[1008,2134,1104],{"class":1025},[1008,2136,2137,2140,2142],{"class":1010,"line":1042},[1008,2138,2139],{"class":1115},"  \"rejected\"",[1008,2141,1856],{"class":1025},[1008,2143,2144],{"class":1054},"\"量子计算就是很快的计算机...\"\n",[1008,2146,2147],{"class":1010,"line":1061},[1008,2148,1917],{"class":1025},[962,2150,2152],{"id":2151},"dpo-的优势","DPO 的优势",[1277,2154,2155,2168],{},[1280,2156,2157],{},[1283,2158,2159,2162,2165],{},[1286,2160,2161],{},"特性",[1286,2163,2164],{},"RLHF",[1286,2166,2167],{},"DPO",[1296,2169,2170,2181,2192,2203,2214],{},[1283,2171,2172,2175,2178],{},[1301,2173,2174],{},"训练步骤",[1301,2176,2177],{},"3 步",[1301,2179,2180],{},"1 步",[1283,2182,2183,2186,2189],{},[1301,2184,2185],{},"是否需要奖励模型",[1301,2187,2188],{},"是",[1301,2190,2191],{},"否",[1283,2193,2194,2197,2200],{},[1301,2195,2196],{},"训练稳定性",[1301,2198,2199],{},"较差",[1301,2201,2202],{},"较好",[1283,2204,2205,2208,2211],{},[1301,2206,2207],{},"实现复杂度",[1301,2209,2210],{},"高",[1301,2212,2213],{},"低",[1283,2215,2216,2219,2222],{},[1301,2217,2218],{},"效果",[1301,2220,2221],{},"略优",[1301,2223,2224],{},"接近",[945,2226,2228],{"className":1002,"code":2227,"language":1004,"meta":11,"style":11},"from trl import DPOTrainer, DPOConfig\n\ndpo_config = DPOConfig(\n    output_dir=\".\u002Fdpo_output\",\n    num_train_epochs=1,\n    per_device_train_batch_size=4,\n    learning_rate=5e-7,\n    beta=0.1,  # KL 惩罚系数\n    bf16=True,\n)\n\ntrainer = DPOTrainer(\n    model=model,\n    ref_model=ref_model,   # SFT 后的参考模型\n    args=dpo_config,\n    train_dataset=dpo_dataset,\n    tokenizer=tokenizer,\n)\n\ntrainer.train()\n",[952,2229,2230,2242,2246,2256,2267,2278,2288,2299,2315,2325,2329,2333,2342,2350,2363,2372,2381,2391,2395,2399],{"__ignoreMap":11},[1008,2231,2232,2234,2237,2239],{"class":1010,"line":1011},[1008,2233,1022],{"class":1021},[1008,2235,2236],{"class":1025}," trl ",[1008,2238,1029],{"class":1021},[1008,2240,2241],{"class":1025}," DPOTrainer, DPOConfig\n",[1008,2243,2244],{"class":1010,"line":1018},[1008,2245,1039],{"emptyLinePlaceholder":1038},[1008,2247,2248,2251,2253],{"class":1010,"line":1035},[1008,2249,2250],{"class":1025},"dpo_config ",[1008,2252,1048],{"class":1021},[1008,2254,2255],{"class":1025}," DPOConfig(\n",[1008,2257,2258,2260,2262,2265],{"class":1010,"line":1042},[1008,2259,1096],{"class":1095},[1008,2261,1048],{"class":1021},[1008,2263,2264],{"class":1054},"\".\u002Fdpo_output\"",[1008,2266,1104],{"class":1025},[1008,2268,2269,2271,2273,2276],{"class":1010,"line":1061},[1008,2270,1110],{"class":1095},[1008,2272,1048],{"class":1021},[1008,2274,2275],{"class":1115},"1",[1008,2277,1104],{"class":1025},[1008,2279,2280,2282,2284,2286],{"class":1010,"line":1076},[1008,2281,1124],{"class":1095},[1008,2283,1048],{"class":1021},[1008,2285,1129],{"class":1115},[1008,2287,1104],{"class":1025},[1008,2289,2290,2292,2294,2297],{"class":1010,"line":1081},[1008,2291,1137],{"class":1095},[1008,2293,1048],{"class":1021},[1008,2295,2296],{"class":1115},"5e-7",[1008,2298,1104],{"class":1025},[1008,2300,2301,2304,2306,2309,2312],{"class":1010,"line":1092},[1008,2302,2303],{"class":1095},"    beta",[1008,2305,1048],{"class":1021},[1008,2307,2308],{"class":1115},"0.1",[1008,2310,2311],{"class":1025},",  ",[1008,2313,2314],{"class":1014},"# KL 惩罚系数\n",[1008,2316,2317,2319,2321,2323],{"class":1010,"line":1107},[1008,2318,1150],{"class":1095},[1008,2320,1048],{"class":1021},[1008,2322,1155],{"class":1115},[1008,2324,1104],{"class":1025},[1008,2326,2327],{"class":1010,"line":1121},[1008,2328,1058],{"class":1025},[1008,2330,2331],{"class":1010,"line":1134},[1008,2332,1039],{"emptyLinePlaceholder":1038},[1008,2334,2335,2337,2339],{"class":1010,"line":1147},[1008,2336,1185],{"class":1025},[1008,2338,1048],{"class":1021},[1008,2340,2341],{"class":1025}," DPOTrainer(\n",[1008,2343,2344,2346,2348],{"class":1010,"line":1160},[1008,2345,1196],{"class":1095},[1008,2347,1048],{"class":1021},[1008,2349,1201],{"class":1025},[1008,2351,2352,2355,2357,2360],{"class":1010,"line":1172},[1008,2353,2354],{"class":1095},"    ref_model",[1008,2356,1048],{"class":1021},[1008,2358,2359],{"class":1025},"ref_model,   ",[1008,2361,2362],{"class":1014},"# SFT 后的参考模型\n",[1008,2364,2365,2367,2369],{"class":1010,"line":1177},[1008,2366,1207],{"class":1095},[1008,2368,1048],{"class":1021},[1008,2370,2371],{"class":1025},"dpo_config,\n",[1008,2373,2374,2376,2378],{"class":1010,"line":1182},[1008,2375,1218],{"class":1095},[1008,2377,1048],{"class":1021},[1008,2379,2380],{"class":1025},"dpo_dataset,\n",[1008,2382,2383,2386,2388],{"class":1010,"line":1193},[1008,2384,2385],{"class":1095},"    tokenizer",[1008,2387,1048],{"class":1021},[1008,2389,2390],{"class":1025},"tokenizer,\n",[1008,2392,2393],{"class":1010,"line":1204},[1008,2394,1058],{"class":1025},[1008,2396,2397],{"class":1010,"line":1215},[1008,2398,1039],{"emptyLinePlaceholder":1038},[1008,2400,2401],{"class":1010,"line":1226},[1008,2402,1239],{"class":1025},[892,2404,2405],{"id":2405},"常用工具",[962,2407,2409],{"id":2408},"hugging-face-生态","Hugging Face 生态",[896,2411,2412],{},"Hugging Face 提供了完整的微调工具链：",[1277,2414,2415,2425],{},[1280,2416,2417],{},[1283,2418,2419,2422],{},[1286,2420,2421],{},"库",[1286,2423,2424],{},"用途",[1296,2426,2427,2437,2447,2457,2467,2477],{},[1283,2428,2429,2434],{},[1301,2430,2431],{},[952,2432,2433],{},"transformers",[1301,2435,2436],{},"模型加载、训练流程",[1283,2438,2439,2444],{},[1301,2440,2441],{},[952,2442,2443],{},"peft",[1301,2445,2446],{},"LoRA、QLoRA 等 PEFT 方法",[1283,2448,2449,2454],{},[1301,2450,2451],{},[952,2452,2453],{},"trl",[1301,2455,2456],{},"SFT、DPO、PPO 训练",[1283,2458,2459,2464],{},[1301,2460,2461],{},[952,2462,2463],{},"datasets",[1301,2465,2466],{},"数据集加载和处理",[1283,2468,2469,2474],{},[1301,2470,2471],{},[952,2472,2473],{},"accelerate",[1301,2475,2476],{},"分布式训练",[1283,2478,2479,2484],{},[1301,2480,2481],{},[952,2482,2483],{},"bitsandbytes",[1301,2485,2486],{},"量化支持",[962,2488,2490],{"id":2489},"llama-factory","LLaMA-Factory",[896,2492,2493],{},"LLaMA-Factory 是一个非常流行的一站式微调框架，支持通过 Web UI 或命令行配置微调参数，大幅降低了使用门槛。",[945,2495,2499],{"className":2496,"code":2497,"language":2498,"meta":11,"style":11},"language-bash shiki shiki-themes github-light github-light github-dark","# 安装\ngit clone https:\u002F\u002Fgithub.com\u002Fhiyouga\u002FLLaMA-Factory.git\ncd LLaMA-Factory\npip install -e \".[torch,metrics]\"\n\n# 启动 Web UI\nllamafactory-cli webui\n\n# 命令行训练\nllamafactory-cli train examples\u002Ftrain_lora\u002Fllama3_lora_sft.yaml\n","bash",[952,2500,2501,2506,2518,2526,2540,2544,2549,2557,2561,2566],{"__ignoreMap":11},[1008,2502,2503],{"class":1010,"line":1011},[1008,2504,2505],{"class":1014},"# 安装\n",[1008,2507,2508,2512,2515],{"class":1010,"line":1018},[1008,2509,2511],{"class":2510},"snPdu","git",[1008,2513,2514],{"class":1054}," clone",[1008,2516,2517],{"class":1054}," https:\u002F\u002Fgithub.com\u002Fhiyouga\u002FLLaMA-Factory.git\n",[1008,2519,2520,2523],{"class":1010,"line":1035},[1008,2521,2522],{"class":1115},"cd",[1008,2524,2525],{"class":1054}," LLaMA-Factory\n",[1008,2527,2528,2531,2534,2537],{"class":1010,"line":1042},[1008,2529,2530],{"class":2510},"pip",[1008,2532,2533],{"class":1054}," install",[1008,2535,2536],{"class":1115}," -e",[1008,2538,2539],{"class":1054}," \".[torch,metrics]\"\n",[1008,2541,2542],{"class":1010,"line":1061},[1008,2543,1039],{"emptyLinePlaceholder":1038},[1008,2545,2546],{"class":1010,"line":1076},[1008,2547,2548],{"class":1014},"# 启动 Web UI\n",[1008,2550,2551,2554],{"class":1010,"line":1081},[1008,2552,2553],{"class":2510},"llamafactory-cli",[1008,2555,2556],{"class":1054}," webui\n",[1008,2558,2559],{"class":1010,"line":1092},[1008,2560,1039],{"emptyLinePlaceholder":1038},[1008,2562,2563],{"class":1010,"line":1107},[1008,2564,2565],{"class":1014},"# 命令行训练\n",[1008,2567,2568,2570,2573],{"class":1010,"line":1121},[1008,2569,2553],{"class":2510},[1008,2571,2572],{"class":1054}," train",[1008,2574,2575],{"class":1054}," examples\u002Ftrain_lora\u002Fllama3_lora_sft.yaml\n",[896,2577,2578],{},"LLaMA-Factory 的优势：",[903,2580,2581,2584,2587,2590,2593],{},[906,2582,2583],{},"支持 100+ 种模型",[906,2585,2586],{},"支持全量微调、LoRA、QLoRA",[906,2588,2589],{},"支持 SFT、DPO、PPO、ORPO 等训练方法",[906,2591,2592],{},"提供可视化的 Web UI",[906,2594,2595],{},"内置数据集管理",[962,2597,2598],{"id":2598},"其他工具",[903,2600,2601,2607,2613],{},[906,2602,2603,2606],{},[909,2604,2605],{},"Axolotl","：配置灵活的微调框架，适合高级用户",[906,2608,2609,2612],{},[909,2610,2611],{},"Unsloth","：专注于加速 LoRA 微调，速度可达 2-5 倍提升",[906,2614,2615,2618],{},[909,2616,2617],{},"OpenRLHF","：专注于 RLHF\u002FDPO 训练的框架",[892,2620,2621],{"id":2621},"实战工作流",[896,2623,2624],{},"一个完整的微调项目通常包含以下步骤：",[962,2626,2628],{"id":2627},"第一步明确目标","第一步：明确目标",[903,2630,2631,2634,2637],{},[906,2632,2633],{},"确定微调要解决的具体问题",[906,2635,2636],{},"评估是否真的需要微调（vs 提示词工程 \u002F RAG）",[906,2638,2639],{},"选择基础模型",[962,2641,2643],{"id":2642},"第二步准备数据","第二步：准备数据",[945,2645,2647],{"className":1002,"code":2646,"language":1004,"meta":11,"style":11},"# 数据处理示例\nimport json\n\ndef prepare_sft_data(raw_data):\n    \"\"\"将原始数据转换为 SFT 格式\"\"\"\n    formatted = []\n    for item in raw_data:\n        formatted.append({\n            \"messages\": [\n                {\"role\": \"system\", \"content\": \"你是一个专业的客服助手。\"},\n                {\"role\": \"user\", \"content\": item[\"question\"]},\n                {\"role\": \"assistant\", \"content\": item[\"answer\"]}\n            ]\n        })\n    return formatted\n\n# 数据划分\n# 训练集 : 验证集 = 9 : 1\n",[952,2648,2649,2654,2661,2665,2676,2681,2691,2705,2710,2717,2739,2762,2784,2789,2794,2802,2806,2811],{"__ignoreMap":11},[1008,2650,2651],{"class":1010,"line":1011},[1008,2652,2653],{"class":1014},"# 数据处理示例\n",[1008,2655,2656,2658],{"class":1010,"line":1018},[1008,2657,1029],{"class":1021},[1008,2659,2660],{"class":1025}," json\n",[1008,2662,2663],{"class":1010,"line":1035},[1008,2664,1039],{"emptyLinePlaceholder":1038},[1008,2666,2667,2670,2673],{"class":1010,"line":1042},[1008,2668,2669],{"class":1021},"def",[1008,2671,2672],{"class":2510}," prepare_sft_data",[1008,2674,2675],{"class":1025},"(raw_data):\n",[1008,2677,2678],{"class":1010,"line":1061},[1008,2679,2680],{"class":1054},"    \"\"\"将原始数据转换为 SFT 格式\"\"\"\n",[1008,2682,2683,2686,2688],{"class":1010,"line":1076},[1008,2684,2685],{"class":1025},"    formatted ",[1008,2687,1048],{"class":1021},[1008,2689,2690],{"class":1025}," []\n",[1008,2692,2693,2696,2699,2702],{"class":1010,"line":1081},[1008,2694,2695],{"class":1021},"    for",[1008,2697,2698],{"class":1025}," item ",[1008,2700,2701],{"class":1021},"in",[1008,2703,2704],{"class":1025}," raw_data:\n",[1008,2706,2707],{"class":1010,"line":1092},[1008,2708,2709],{"class":1025},"        formatted.append({\n",[1008,2711,2712,2715],{"class":1010,"line":1107},[1008,2713,2714],{"class":1054},"            \"messages\"",[1008,2716,1845],{"class":1025},[1008,2718,2719,2722,2724,2726,2728,2730,2732,2734,2737],{"class":1010,"line":1121},[1008,2720,2721],{"class":1025},"                {",[1008,2723,1853],{"class":1054},[1008,2725,1856],{"class":1025},[1008,2727,1859],{"class":1054},[1008,2729,1496],{"class":1025},[1008,2731,1864],{"class":1054},[1008,2733,1856],{"class":1025},[1008,2735,2736],{"class":1054},"\"你是一个专业的客服助手。\"",[1008,2738,1872],{"class":1025},[1008,2740,2741,2743,2745,2747,2749,2751,2753,2756,2759],{"class":1010,"line":1134},[1008,2742,2721],{"class":1025},[1008,2744,1853],{"class":1054},[1008,2746,1856],{"class":1025},[1008,2748,1883],{"class":1054},[1008,2750,1496],{"class":1025},[1008,2752,1864],{"class":1054},[1008,2754,2755],{"class":1025},": item[",[1008,2757,2758],{"class":1054},"\"question\"",[1008,2760,2761],{"class":1025},"]},\n",[1008,2763,2764,2766,2768,2770,2772,2774,2776,2778,2781],{"class":1010,"line":1147},[1008,2765,2721],{"class":1025},[1008,2767,1853],{"class":1054},[1008,2769,1856],{"class":1025},[1008,2771,1905],{"class":1054},[1008,2773,1496],{"class":1025},[1008,2775,1864],{"class":1054},[1008,2777,2755],{"class":1025},[1008,2779,2780],{"class":1054},"\"answer\"",[1008,2782,2783],{"class":1025},"]}\n",[1008,2785,2786],{"class":1010,"line":1160},[1008,2787,2788],{"class":1025},"            ]\n",[1008,2790,2791],{"class":1010,"line":1172},[1008,2792,2793],{"class":1025},"        })\n",[1008,2795,2796,2799],{"class":1010,"line":1177},[1008,2797,2798],{"class":1021},"    return",[1008,2800,2801],{"class":1025}," formatted\n",[1008,2803,2804],{"class":1010,"line":1182},[1008,2805,1039],{"emptyLinePlaceholder":1038},[1008,2807,2808],{"class":1010,"line":1193},[1008,2809,2810],{"class":1014},"# 数据划分\n",[1008,2812,2813],{"class":1010,"line":1204},[1008,2814,2815],{"class":1014},"# 训练集 : 验证集 = 9 : 1\n",[962,2817,2819],{"id":2818},"第三步配置训练","第三步：配置训练",[945,2821,2825],{"className":2822,"code":2823,"language":2824,"meta":11,"style":11},"language-yaml shiki shiki-themes github-light github-light github-dark","# LLaMA-Factory 配置示例 (train_config.yaml)\nmodel_name_or_path: Qwen\u002FQwen2.5-7B\nstage: sft\nfinetuning_type: lora\n\n# LoRA 参数\nlora_rank: 16\nlora_alpha: 32\nlora_target: all\n\n# 训练参数\nnum_train_epochs: 3\nper_device_train_batch_size: 4\ngradient_accumulation_steps: 4\nlearning_rate: 1.0e-4\nlr_scheduler_type: cosine\nwarmup_ratio: 0.1\nbf16: true\n\n# 数据\ndataset: my_custom_dataset\ntemplate: qwen\n","yaml",[952,2826,2827,2832,2843,2853,2863,2867,2872,2882,2892,2902,2906,2911,2921,2931,2940,2950,2960,2970,2980,2984,2989,2999],{"__ignoreMap":11},[1008,2828,2829],{"class":1010,"line":1011},[1008,2830,2831],{"class":1014},"# LLaMA-Factory 配置示例 (train_config.yaml)\n",[1008,2833,2834,2838,2840],{"class":1010,"line":1018},[1008,2835,2837],{"class":2836},"sovSZ","model_name_or_path",[1008,2839,1856],{"class":1025},[1008,2841,2842],{"class":1054},"Qwen\u002FQwen2.5-7B\n",[1008,2844,2845,2848,2850],{"class":1010,"line":1035},[1008,2846,2847],{"class":2836},"stage",[1008,2849,1856],{"class":1025},[1008,2851,2852],{"class":1054},"sft\n",[1008,2854,2855,2858,2860],{"class":1010,"line":1042},[1008,2856,2857],{"class":2836},"finetuning_type",[1008,2859,1856],{"class":1025},[1008,2861,2862],{"class":1054},"lora\n",[1008,2864,2865],{"class":1010,"line":1061},[1008,2866,1039],{"emptyLinePlaceholder":1038},[1008,2868,2869],{"class":1010,"line":1076},[1008,2870,2871],{"class":1014},"# LoRA 参数\n",[1008,2873,2874,2877,2879],{"class":1010,"line":1081},[1008,2875,2876],{"class":2836},"lora_rank",[1008,2878,1856],{"class":1025},[1008,2880,2881],{"class":1115},"16\n",[1008,2883,2884,2887,2889],{"class":1010,"line":1092},[1008,2885,2886],{"class":2836},"lora_alpha",[1008,2888,1856],{"class":1025},[1008,2890,2891],{"class":1115},"32\n",[1008,2893,2894,2897,2899],{"class":1010,"line":1107},[1008,2895,2896],{"class":2836},"lora_target",[1008,2898,1856],{"class":1025},[1008,2900,2901],{"class":1054},"all\n",[1008,2903,2904],{"class":1010,"line":1121},[1008,2905,1039],{"emptyLinePlaceholder":1038},[1008,2907,2908],{"class":1010,"line":1134},[1008,2909,2910],{"class":1014},"# 训练参数\n",[1008,2912,2913,2916,2918],{"class":1010,"line":1147},[1008,2914,2915],{"class":2836},"num_train_epochs",[1008,2917,1856],{"class":1025},[1008,2919,2920],{"class":1115},"3\n",[1008,2922,2923,2926,2928],{"class":1010,"line":1160},[1008,2924,2925],{"class":2836},"per_device_train_batch_size",[1008,2927,1856],{"class":1025},[1008,2929,2930],{"class":1115},"4\n",[1008,2932,2933,2936,2938],{"class":1010,"line":1172},[1008,2934,2935],{"class":2836},"gradient_accumulation_steps",[1008,2937,1856],{"class":1025},[1008,2939,2930],{"class":1115},[1008,2941,2942,2945,2947],{"class":1010,"line":1177},[1008,2943,2944],{"class":2836},"learning_rate",[1008,2946,1856],{"class":1025},[1008,2948,2949],{"class":1115},"1.0e-4\n",[1008,2951,2952,2955,2957],{"class":1010,"line":1182},[1008,2953,2954],{"class":2836},"lr_scheduler_type",[1008,2956,1856],{"class":1025},[1008,2958,2959],{"class":1054},"cosine\n",[1008,2961,2962,2965,2967],{"class":1010,"line":1193},[1008,2963,2964],{"class":2836},"warmup_ratio",[1008,2966,1856],{"class":1025},[1008,2968,2969],{"class":1115},"0.1\n",[1008,2971,2972,2975,2977],{"class":1010,"line":1204},[1008,2973,2974],{"class":2836},"bf16",[1008,2976,1856],{"class":1025},[1008,2978,2979],{"class":1115},"true\n",[1008,2981,2982],{"class":1010,"line":1215},[1008,2983,1039],{"emptyLinePlaceholder":1038},[1008,2985,2986],{"class":1010,"line":1226},[1008,2987,2988],{"class":1014},"# 数据\n",[1008,2990,2991,2994,2996],{"class":1010,"line":1231},[1008,2992,2993],{"class":2836},"dataset",[1008,2995,1856],{"class":1025},[1008,2997,2998],{"class":1054},"my_custom_dataset\n",[1008,3000,3001,3004,3006],{"class":1010,"line":1236},[1008,3002,3003],{"class":2836},"template",[1008,3005,1856],{"class":1025},[1008,3007,3008],{"class":1054},"qwen\n",[962,3010,3012],{"id":3011},"第四步训练与监控","第四步：训练与监控",[903,3014,3015,3018,3021],{},[906,3016,3017],{},"观察 loss 曲线是否正常下降",[906,3019,3020],{},"在验证集上监控指标",[906,3022,3023],{},"注意过拟合的迹象",[962,3025,3027],{"id":3026},"第五步评估与部署","第五步：评估与部署",[903,3029,3030,3033,3036],{},[906,3031,3032],{},"在测试集和真实场景中评估效果",[906,3034,3035],{},"人工评估生成质量",[906,3037,3038],{},"将 LoRA 适配器合并到基础模型中（可选）",[945,3040,3042],{"className":1002,"code":3041,"language":1004,"meta":11,"style":11},"# 合并 LoRA 适配器\nfrom peft import PeftModel\n\nbase_model = AutoModelForCausalLM.from_pretrained(\"Qwen\u002FQwen2.5-7B\")\nmodel = PeftModel.from_pretrained(base_model, \".\u002Flora_output\")\nmerged_model = model.merge_and_unload()\nmerged_model.save_pretrained(\".\u002Fmerged_model\")\n",[952,3043,3044,3049,3060,3064,3078,3092,3102],{"__ignoreMap":11},[1008,3045,3046],{"class":1010,"line":1011},[1008,3047,3048],{"class":1014},"# 合并 LoRA 适配器\n",[1008,3050,3051,3053,3055,3057],{"class":1010,"line":1018},[1008,3052,1022],{"class":1021},[1008,3054,1388],{"class":1025},[1008,3056,1029],{"class":1021},[1008,3058,3059],{"class":1025}," PeftModel\n",[1008,3061,3062],{"class":1010,"line":1035},[1008,3063,1039],{"emptyLinePlaceholder":1038},[1008,3065,3066,3069,3071,3073,3076],{"class":1010,"line":1042},[1008,3067,3068],{"class":1025},"base_model ",[1008,3070,1048],{"class":1021},[1008,3072,1051],{"class":1025},[1008,3074,3075],{"class":1054},"\"Qwen\u002FQwen2.5-7B\"",[1008,3077,1058],{"class":1025},[1008,3079,3080,3082,3084,3087,3090],{"class":1010,"line":1061},[1008,3081,1045],{"class":1025},[1008,3083,1048],{"class":1021},[1008,3085,3086],{"class":1025}," PeftModel.from_pretrained(base_model, ",[1008,3088,3089],{"class":1054},"\".\u002Flora_output\"",[1008,3091,1058],{"class":1025},[1008,3093,3094,3097,3099],{"class":1010,"line":1076},[1008,3095,3096],{"class":1025},"merged_model ",[1008,3098,1048],{"class":1021},[1008,3100,3101],{"class":1025}," model.merge_and_unload()\n",[1008,3103,3104,3107,3110],{"class":1010,"line":1081},[1008,3105,3106],{"class":1025},"merged_model.save_pretrained(",[1008,3108,3109],{"class":1054},"\".\u002Fmerged_model\"",[1008,3111,1058],{"class":1025},[892,3113,3114],{"id":3114},"常见问题与建议",[1277,3116,3117,3130],{},[1280,3118,3119],{},[1283,3120,3121,3124,3127],{},[1286,3122,3123],{},"问题",[1286,3125,3126],{},"可能原因",[1286,3128,3129],{},"解决方案",[1296,3131,3132,3143,3154,3164,3175],{},[1283,3133,3134,3137,3140],{},[1301,3135,3136],{},"Loss 不下降",[1301,3138,3139],{},"学习率过小\u002F过大",[1301,3141,3142],{},"调整学习率，尝试 1e-4 到 5e-5",[1283,3144,3145,3148,3151],{},[1301,3146,3147],{},"过拟合",[1301,3149,3150],{},"数据量不足",[1301,3152,3153],{},"增加数据、增大 dropout、减少 epoch",[1283,3155,3156,3158,3161],{},[1301,3157,993],{},[1301,3159,3160],{},"学习率过大\u002F训练过久",[1301,3162,3163],{},"降低学习率、减少训练步数",[1283,3165,3166,3169,3172],{},[1301,3167,3168],{},"生成质量差",[1301,3170,3171],{},"数据质量问题",[1301,3173,3174],{},"清洗数据、提高标注质量",[1283,3176,3177,3180,3183],{},[1301,3178,3179],{},"显存不足",[1301,3181,3182],{},"模型\u002F批次太大",[1301,3184,3185],{},"使用 QLoRA、减小 batch size、增大梯度累积",[892,3187,3188],{"id":3188},"小结",[896,3190,3191],{},"模型微调是将通用大模型转化为领域专家的关键技术。对于大多数开发者来说，LoRA\u002FQLoRA + SFT 是最实用的组合，它在效果和成本之间取得了良好平衡。在实践中，数据质量始终是决定微调效果的最关键因素。",[3193,3194,3195],"style",{},"html pre.shiki code .sCsY4, html code.shiki .sCsY4{--shiki-light:#6A737D;--shiki-default:#6A737D;--shiki-dark:#6A737D}html pre.shiki code .s8jYJ, html code.shiki .s8jYJ{--shiki-light:#D73A49;--shiki-default:#D73A49;--shiki-dark:#F97583}html pre.shiki code .sxrX7, html code.shiki .sxrX7{--shiki-light:#24292E;--shiki-default:#24292E;--shiki-dark:#E1E4E8}html pre.shiki code .sIIMD, html code.shiki .sIIMD{--shiki-light:#032F62;--shiki-default:#032F62;--shiki-dark:#9ECBFF}html pre.shiki code .sP4rz, html code.shiki .sP4rz{--shiki-light:#E36209;--shiki-default:#E36209;--shiki-dark:#FFAB70}html pre.shiki code .sBjJW, html code.shiki .sBjJW{--shiki-light:#005CC5;--shiki-default:#005CC5;--shiki-dark:#79B8FF}html .light .shiki span {color: var(--shiki-light);background: var(--shiki-light-bg);font-style: var(--shiki-light-font-style);font-weight: var(--shiki-light-font-weight);text-decoration: var(--shiki-light-text-decoration);}html.light .shiki span {color: var(--shiki-light);background: var(--shiki-light-bg);font-style: var(--shiki-light-font-style);font-weight: var(--shiki-light-font-weight);text-decoration: var(--shiki-light-text-decoration);}html .default .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .shiki span {color: var(--shiki-default);background: var(--shiki-default-bg);font-style: var(--shiki-default-font-style);font-weight: var(--shiki-default-font-weight);text-decoration: var(--shiki-default-text-decoration);}html .dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}html.dark .shiki span {color: var(--shiki-dark);background: var(--shiki-dark-bg);font-style: var(--shiki-dark-font-style);font-weight: var(--shiki-dark-font-weight);text-decoration: var(--shiki-dark-text-decoration);}html pre.shiki code .snPdu, html code.shiki .snPdu{--shiki-light:#6F42C1;--shiki-default:#6F42C1;--shiki-dark:#B392F0}html pre.shiki code .sovSZ, html code.shiki .sovSZ{--shiki-light:#22863A;--shiki-default:#22863A;--shiki-dark:#85E89D}",{"title":11,"searchDepth":1018,"depth":1018,"links":3197},[3198,3199,3200,3204,3210,3214,3218,3221,3225,3230,3237,3238],{"id":894,"depth":1018,"text":894},{"id":943,"depth":1018,"text":943},{"id":956,"depth":1018,"text":957,"children":3201},[3202,3203],{"id":964,"depth":1035,"text":964},{"id":975,"depth":1035,"text":975},{"id":1246,"depth":1018,"text":1247,"children":3205},[3206,3207,3208,3209],{"id":1253,"depth":1035,"text":1253},{"id":1268,"depth":1035,"text":1268},{"id":1345,"depth":1035,"text":1345},{"id":1376,"depth":1035,"text":1376},{"id":1579,"depth":1018,"text":1580,"children":3211},[3212,3213],{"id":1586,"depth":1035,"text":1586},{"id":1609,"depth":1035,"text":1609},{"id":1815,"depth":1018,"text":1816,"children":3215},[3216,3217],{"id":1822,"depth":1035,"text":1822},{"id":1979,"depth":1035,"text":1979},{"id":2039,"depth":1018,"text":2040,"children":3219},[3220],{"id":2046,"depth":1035,"text":2046},{"id":2091,"depth":1018,"text":2092,"children":3222},[3223,3224],{"id":2098,"depth":1035,"text":2098},{"id":2151,"depth":1035,"text":2152},{"id":2405,"depth":1018,"text":2405,"children":3226},[3227,3228,3229],{"id":2408,"depth":1035,"text":2409},{"id":2489,"depth":1035,"text":2490},{"id":2598,"depth":1035,"text":2598},{"id":2621,"depth":1018,"text":2621,"children":3231},[3232,3233,3234,3235,3236],{"id":2627,"depth":1035,"text":2628},{"id":2642,"depth":1035,"text":2643},{"id":2818,"depth":1035,"text":2819},{"id":3011,"depth":1035,"text":3012},{"id":3026,"depth":1035,"text":3027},{"id":3114,"depth":1018,"text":3114},{"id":3188,"depth":1018,"text":3188},"md",{},{"title":58,"description":59},"ai\u002Fllm\u002Ffine-tuning","HKXKxDK0l2dBPEgl_knZHtB6mCJv8qCnXrLaCwWddx8",1775474634745]