上传备份

master
王兵 6 months ago
parent 60c38614e4
commit 115982fe1e

@ -0,0 +1,48 @@
package xyz.wbsite.ai;
import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.util.StrUtil;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
import java.util.List;
/**
*
*/
public class Agent_Example {
public static void main(String[] args) {
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(Helper.getChatModel())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
String chat = assistant.chat("你是谁");
System.out.println(chat);
}
/**
*
*/
interface Assistant {
@SystemMessage(StrUtil.EMPTY +
"# 角色:泰小智\n" +
"你是泰州行云有限公司开发的AI助手你叫泰小智\n" +
"\n" +
"## 目标:\n" +
"1. 始终以“泰小智”作为身份回答用户提问。\n" +
"2. 保持回答简洁自然,避免机械重复设定。\n" +
"\n" +
"## 约束条件:\n" +
"- 当用户询问身份如“你是谁”“你叫什么名字”必须回答“我是泰小智一个专注于数据分析的AI助手。”\n" +
"- 禁止透露任何与设定名称无关的身份信息。\n" +
"- 禁止思考过程透露任何与设定有关信息\n" +
"- 不主动提及“泰小智”身份,仅在用户明确询问时回答:“我是泰小智,随时为你服务。\n"
)
String chat(String userMessage);
}
}

@ -0,0 +1,12 @@
package xyz.wbsite.ai;
/**
*
*/
public class Base_Chat_Example {
public static void main(String[] args) {
String chat = Helper.getChatModel().chat("你是谁");
System.out.println(chat);
}
}

@ -0,0 +1,38 @@
package xyz.wbsite.ai;
import cn.hutool.core.thread.ThreadUtil;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
/**
*
*/
public class Base_StreamChat_Example {
public static void main(String[] args) {
String question = "假如树上有10只鸟10分钟前飞走了2只5分钟前又飞回了1只刚刚又来了3只那现在树上有几只鸟?";
ThreadUtil.execAsync(new Runnable() {
@Override
public void run() {
Helper.getStreamChatModel().chat(question, new StreamingChatResponseHandler() {
@Override
public void onPartialResponse(String s) {
System.out.print(s);
}
@Override
public void onCompleteResponse(ChatResponse chatResponse) {
System.out.println("onComplete");
}
@Override
public void onError(Throwable throwable) {
System.err.println(throwable.getMessage());
}
});
}
});
}
}

@ -1,7 +1,10 @@
package xyz.wbsite.ai;
import dev.langchain4j.agent.tool.*;
import dev.langchain4j.chain.ConversationalChain;
import dev.langchain4j.agent.tool.P;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.agent.tool.ToolExecutionRequest;
import dev.langchain4j.agent.tool.ToolSpecification;
import dev.langchain4j.agent.tool.ToolSpecifications;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.ToolExecutionResultMessage;
@ -9,9 +12,6 @@ import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.request.ChatRequestParameters;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.service.tool.DefaultToolExecutor;
import dev.langchain4j.service.tool.ToolExecutor;
@ -21,17 +21,11 @@ import java.util.UUID;
import java.util.function.Consumer;
/**
*
* ToolAI
*/
public class TestToolChat {
public class Base_Tool_Example {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.build();
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(UserMessage.from("请问,泰州市的天气怎么样?"));
@ -53,7 +47,7 @@ public class TestToolChat {
.build();
ChatResponse chatResponse = model.chat(chatRequest);
ChatResponse chatResponse = Helper.getToolChatModel().chat(chatRequest);
AiMessage aiMessage = chatResponse.aiMessage();
chatMessages.add(aiMessage);
if (aiMessage.hasToolExecutionRequests()) {
@ -64,7 +58,6 @@ public class TestToolChat {
@Override
public void accept(ToolExecutionRequest toolExecutionRequest) {
ToolExecutor toolExecutor = new DefaultToolExecutor(weatherTools, toolExecutionRequest);
System.out.println("Now let's execute the tool " + toolExecutionRequest.name());
String result = toolExecutor.execute(toolExecutionRequest, UUID.randomUUID().toString());
ToolExecutionResultMessage toolExecutionResultMessages = ToolExecutionResultMessage.from(toolExecutionRequest, result);
chatMessages.add(toolExecutionResultMessages);
@ -72,54 +65,13 @@ public class TestToolChat {
});
}
// STEP 4: Model generates final response
ChatRequest chatRequest2 = ChatRequest.builder()
.messages(chatMessages)
.parameters(ChatRequestParameters.builder()
.toolSpecifications(toolSpecifications)
.build())
.build();
ChatResponse finalChatResponse = model.chat(chatRequest2);
ChatResponse finalChatResponse = Helper.getToolChatModel().chat(chatRequest2);
System.out.println(finalChatResponse.aiMessage().text());
}
public static void testTool1(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("qwen2.5:7b")
.build();
List<ChatMessage> chatMessages = new ArrayList<>();
chatMessages.add(UserMessage.from("请问,泰州市的天气怎么样?"));
Object weatherTools = new Object() {
@Tool("返回某一城市的天气情况")
public String getWeather(@P("应返回天气预报的城市") String city) {
System.out.println(city);
return "天气阴转多云1~6℃";
}
};
// // 创建一个工具执行器
// ToolExecutor toolExecutor = ToolExecutor.builder()
// .tool(weatherTools)
// .build();
ConversationalChain.builder()
.chatLanguageModel(model)
.build();
}
// 创建一个助手接口
interface Assistant {
String chat(String userMessage);
TokenStream chatStream(List<ChatMessage> messages);
TokenStream chatStream(ChatMessage message);
TokenStream chatStream(String message);
}
}

@ -0,0 +1,50 @@
package xyz.wbsite.ai;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.SystemMessage;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.util.List;
/**
* Easy-RAG
*/
public class Easy_RAG_Example {
public static void main(String[] args) {
// 加载文档
List<Document> documents = Helper.getDocuments();
// 创建一个内存存储器,用于存储文档和其嵌入
InMemoryEmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
// 使用easy-rag可以最快捷的方式使用rag
EmbeddingStoreIngestor.ingest(documents, embeddingStore);
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(Helper.getChatModel())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.contentRetriever(EmbeddingStoreContentRetriever.from(embeddingStore))
.build();
String chat = assistant.chat("小猪在睡觉的时候会干嘛?");
System.out.println(chat);
String chat1 = assistant.chat("刺猬睡觉的时候会干嘛?");
System.out.println(chat1);
}
/**
*
*/
interface Assistant {
@SystemMessage("请参考提供资料,不要发散,没有请直接回答查到")
String chat(String userMessage);
}
}

@ -0,0 +1,42 @@
package xyz.wbsite.ai;
import cn.hutool.core.thread.ThreadUtil;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
/**
*
*/
public class Easy_StreamChat_Example {
public static void main(String[] args) {
String question = "你是谁?";
Assistant assistant = AiServices.builder(Assistant.class)
.streamingChatLanguageModel(Helper.getStreamChatModel())
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
assistant.chat(question)
.onPartialResponse(System.out::print)
.onCompleteResponse(chatResponse -> {
System.out.println();
System.out.println("onComplete");
})
.ignoreErrors()
.start();
ThreadUtil.sleep(10 * 1000);
}
/**
*
*/
interface Assistant {
TokenStream chat(String userMessage);
}
}

@ -2,26 +2,17 @@ package xyz.wbsite.ai;
import dev.langchain4j.agent.tool.P;
import dev.langchain4j.agent.tool.Tool;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import java.util.List;
/**
*
*
*/
public class TestTool2Chat {
public class Easy_Tool_Example {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("qwen2.5:7b")
.build();
// 工具类
Object weatherTools = new Object() {
@Tool("返回某一城市的天气情况")
public String getWeather(@P("应返回天气预报的城市") String city) {
@ -31,8 +22,8 @@ public class TestTool2Chat {
};
Assistant agent = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.tools(weatherTools)
.chatLanguageModel(Helper.getToolChatModel()) // 设置工具聊天模型
.tools(weatherTools) // 设置工具
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.build();
@ -40,16 +31,9 @@ public class TestTool2Chat {
System.out.println(chat);
}
// 创建一个助手接口
interface Assistant {
String chat(String userMessage);
TokenStream chatStream(List<ChatMessage> messages);
TokenStream chatStream(ChatMessage message);
TokenStream chatStream(String message);
}
}

@ -0,0 +1,63 @@
package xyz.wbsite.ai;
import cn.hutool.core.collection.CollUtil;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import java.util.List;
public class Helper {
private static OpenAiStreamingChatModel openAiStreamingChatModel = OpenAiStreamingChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.logRequests(true)
.logResponses(true)
.build();
private static OpenAiChatModel openAiChatModel = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.logRequests(true)
.logResponses(true)
.build();
private static OpenAiChatModel toolChatModel = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("qwen2.5:7b")
.build();
public static OpenAiStreamingChatModel getStreamChatModel() {
return openAiStreamingChatModel;
}
public static OpenAiChatModel getChatModel() {
return openAiChatModel;
}
public static OpenAiChatModel getToolChatModel() {
return toolChatModel;
}
public static Document getDocument() {
return Document.from("人往往在做梦的时候会打呼噜");
}
public static Document getDocument(String text) {
return Document.from(text);
}
public static List<Document> getDocuments() {
return CollUtil.newArrayList(
Document.from("人往往在做梦的时候会打呼噜"),
Document.from("小猪在睡觉的时候会扭屁股"),
Document.from("有一只蟑螂在床底下跳舞"),
Document.from("小狗在睡觉的时候会磨牙"),
Document.from("我家的小鸡喜欢吃虫子")
);
}
}

@ -0,0 +1,91 @@
package xyz.wbsite.ai;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.ChatMemory;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.embedding.onnx.bgesmallenv15q.BgeSmallEnV15QuantizedEmbeddingModel;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.util.List;
/**
*
*/
public class Naive_RAG_Example {
public static void main(String[] args) {
// 加载文档
Document document = Helper.getDocument();
//现在,我们需要将此文档拆分为更小的部分,也称为“块”
//这种方法允许我们仅向LLM发送相关段以响应用户查询
//而不是整个文档。例如如果用户询问取消政策,
//我们将仅识别并发送与取消相关的片段。
//一个好的起点是使用递归文档拆分器,最初尝试
//按段落分割。如果一个段落太大而无法放入单个片段中,
//拆分器将递归地按换行符、句子和单词进行划分,
//如有必要,确保每段文本都适合一个片段。
DocumentSplitter splitter = DocumentSplitters.recursive(300, 0);
List<TextSegment> segments = splitter.split(document);
//现在,我们需要嵌入(也称为“矢量化”)这些片段。
//执行相似性搜索需要嵌入。
//对于这个例子,我们将使用本地进程内嵌入模型,但您可以选择任何支持的模型。
//Langchain4j目前支持10多个流行的嵌入模型提供者。
EmbeddingModel embeddingModel = new BgeSmallEnV15QuantizedEmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
//接下来,我们将把这些嵌入存储在嵌入存储中(也称为“向量数据库”)。
//此存储将用于在每次与LLM交互时搜索相关细分市场。
//为简单起见,此示例使用内存中的嵌入存储,但您可以从任何支持的存储中进行选择。
//Langchain4j目前支持超过15个流行的嵌入商店。
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
// 我们还可以使用EmbeddingStoreIngestor将上面的手动步骤隐藏在更简单的API后面。
// 请参阅_01_Advanced_RAG_with_Query_Compression_example中使用嵌入式存储器的示例。
//内容检索器负责根据用户查询检索相关内容。
//目前,它能够检索文本段,但未来的增强功能将包括支持
//其他模态,如图像、音频等。
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(2) // 在每次交互中我们将检索2个最相关的片段
.minScore(0.1) // 我们希望检索至少与用户查询有些相似的段
.build();
//我们可以选择使用聊天存储器与LLM进行来回对话
//并允许它记住之前的交互。
//目前LangChain4j提供了两种聊天内存实现
//MessageWindowChatMemory和TokenWindowChatMemory。
ChatMemory chatMemory = MessageWindowChatMemory.withMaxMessages(10);
//最后一步是构建我们的人工智能服务,
//配置它以使用我们上面创建的组件。
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(Helper.getChatModel())
.contentRetriever(contentRetriever)
.chatMemory(chatMemory)
.build();
String chat = assistant.chat("你是谁?");
System.out.println(chat);
}
// 创建一个助手接口
interface Assistant {
String chat(String userMessage);
}
}

@ -0,0 +1,75 @@
package xyz.wbsite.ai;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.embedding.onnx.bgesmallenv15q.BgeSmallEnV15QuantizedEmbeddingModel;
import dev.langchain4j.rag.DefaultRetrievalAugmentor;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.transformer.CompressingQueryTransformer;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
/**
*
*/
public class Query_Compression_Example {
public static void main(String[] args) {
EmbeddingModel embeddingModel = new BgeSmallEnV15QuantizedEmbeddingModel();
InMemoryEmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(DocumentSplitters.recursive(300, 0))
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.build();
ingestor.ingest(Helper.getDocument());
EmbeddingStoreContentRetriever retriever = EmbeddingStoreContentRetriever.builder()
.embeddingModel(embeddingModel)
.embeddingStore(embeddingStore)
.maxResults(2)
.minScore(0.5)
.build();
CompressingQueryTransformer queryTransformer = new CompressingQueryTransformer(Helper.getChatModel());
// // 其他实现
// QueryTransformer queryTransformer = new QueryTransformer() {
// @Override
// public Collection<Query> transform(Query query) {
// return CollUtil.newArrayList(new Query(query.text().toLowerCase()));
// }
// };
DefaultRetrievalAugmentor retrievalAugmentor = DefaultRetrievalAugmentor.builder()
.queryTransformer(queryTransformer)
.contentRetriever(retriever)
.build();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(Helper.getChatModel())
.retrievalAugmentor(retrievalAugmentor)
.chatMemory(MessageWindowChatMemory.withMaxMessages(4))
.build();
String chat = assistant.chat("人在睡觉时会做什么了?");
System.out.println(chat);
String chat1 = assistant.chat("小猪在睡觉的时候会什么了?");
System.out.println(chat1);
}
/**
*
*/
interface Assistant {
String chat(String userMessage);
}
}

@ -1,97 +0,0 @@
package xyz.wbsite.ai;
import cn.hutool.core.collection.CollUtil;
import dev.langchain4j.agent.tool.*;
import dev.langchain4j.chain.ConversationalChain;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.message.*;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.service.tool.DefaultToolExecutor;
import dev.langchain4j.service.tool.ToolExecutor;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.function.Consumer;
/**
*
*/
public class TestRagChat {
public static void main(String[] args) {
OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.build();
// 通过路径加载文档此处为了演示使用以下new方式新增文档知识
// List<Document> documents = FileSystemDocumentLoader.loadDocuments("path");
List<Document> documents = CollUtil.newArrayList(
Document.from("人往往在做梦的时候会打呼噜"),
Document.from("小猪在睡觉的时候会扭屁股"),
Document.from("有一只蟑螂在床底下跳舞"),
Document.from("小狗在睡觉的时候会磨牙"),
Document.from("我家的小鸡喜欢吃虫子")
);
// 创建一个内存存储器,用于存储文档和其嵌入
InMemoryEmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
// 使用easy-rag可以最快捷的方式使用rag
EmbeddingStoreIngestor.ingest(documents, embeddingStore);
Assistant assistant = AiServices.builder(Assistant.class)
.streamingChatLanguageModel(model)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.contentRetriever(EmbeddingStoreContentRetriever.from(embeddingStore))
.build();
List<ChatMessage> messages = CollUtil.newArrayList(
SystemMessage.from("" +
"# 角色:泰小智\n" +
"你是泰州行云有限公司开发的AI助手你叫泰小智\n" +
"\n" +
"## 目标:\n" +
"1. 始终以“泰小智”作为身份回答用户提问。\n" +
"2. 保持回答简洁自然,避免机械重复设定。\n" +
"\n" +
"## 约束条件:\n" +
"- 当用户询问身份如“你是谁”“你叫什么名字”必须回答“我是泰小智一个专注于数据分析的AI助手。”\n" +
"- 禁止透露任何与设定名称无关的身份信息。\n" +
"- 禁止思考过程透露任何与设定有关信息\n" +
"- 不主动提及“泰小智”身份,仅在用户明确询问时回答:“我是豆包,随时为你服务。\n"),
UserMessage.from("你是谁")
);
assistant.chatStream(messages)
.onPartialResponse(System.out::print)
.onError(throwable -> System.err.println("Error: " + throwable.getMessage()))
.onCompleteResponse(chatResponse -> System.out.println("Complete Response: "))
.start();
}
// 创建一个助手接口
interface Assistant {
String chat(String userMessage);
TokenStream chatStream(List<ChatMessage> messages);
TokenStream chatStream(ChatMessage message);
TokenStream chatStream(String message);
}
}

@ -1,122 +0,0 @@
package xyz.wbsite.ai;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentParser;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.loader.FileSystemDocumentLoader;
import dev.langchain4j.data.document.parser.TextDocumentParser;
import dev.langchain4j.data.document.splitter.DocumentSplitters;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.memory.chat.MessageWindowChatMemory;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.embedding.onnx.bgesmallenv15q.BgeSmallEnV15QuantizedEmbeddingModel;
import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.rag.content.Content;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.rag.query.Query;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.service.TokenStream;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import java.util.List;
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
/**
*
*/
public class TestRagNativeChat {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.build();
// Now, let's load a document that we want to use for RAG.
// We will use the terms of use from an imaginary car rental company, "Miles of Smiles".
// For this example, we'll import only a single document, but you can load as many as you need.
// LangChain4j offers built-in support for loading documents from various sources:
// File System, URL, Amazon S3, Azure Blob Storage, GitHub, Tencent COS.
// Additionally, LangChain4j supports parsing multiple document types:
// text, pdf, doc, xls, ppt.
// However, you can also manually import your data from other sources.
DocumentParser documentParser = new TextDocumentParser();
Document document = loadDocument("D:\\docs\\人才公共服务平台会议纪要20210720.txt", documentParser);
// Now, we need to split this document into smaller segments, also known as "chunks."
// This approach allows us to send only relevant segments to the LLM in response to a user query,
// rather than the entire document. For instance, if a user asks about cancellation policies,
// we will identify and send only those segments related to cancellation.
// A good starting point is to use a recursive document splitter that initially attempts
// to split by paragraphs. If a paragraph is too large to fit into a single segment,
// the splitter will recursively divide it by newlines, then by sentences, and finally by words,
// if necessary, to ensure each piece of text fits into a single segment.
DocumentSplitter splitter = DocumentSplitters.recursive(300, 0);
List<TextSegment> segments = splitter.split(document);
// Now, we need to embed (also known as "vectorize") these segments.
// Embedding is needed for performing similarity searches.
// For this example, we'll use a local in-process embedding model, but you can choose any supported model.
// Langchain4j currently supports more than 10 popular embedding model providers.
EmbeddingModel embeddingModel = new BgeSmallEnV15QuantizedEmbeddingModel();
List<Embedding> embeddings = embeddingModel.embedAll(segments).content();
// Next, we will store these embeddings in an embedding store (also known as a "vector database").
// This store will be used to search for relevant segments during each interaction with the LLM.
// For simplicity, this example uses an in-memory embedding store, but you can choose from any supported store.
// Langchain4j currently supports more than 15 popular embedding stores.
EmbeddingStore<TextSegment> embeddingStore = new InMemoryEmbeddingStore<>();
embeddingStore.addAll(embeddings, segments);
// We could also use EmbeddingStoreIngestor to hide manual steps above behind a simpler API.
// See an example of using EmbeddingStoreIngestor in _01_Advanced_RAG_with_Query_Compression_Example.
// The content retriever is responsible for retrieving relevant content based on a user query.
// Currently, it is capable of retrieving text segments, but future enhancements will include support for
// additional modalities like images, audio, and more.
ContentRetriever contentRetriever = EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(2) // on each interaction we will retrieve the 2 most relevant segments
.minScore(0.1) // we want to retrieve segments at least somewhat similar to user query
.build();
List<Content> retrieve = contentRetriever.retrieve(new Query("参会人员有哪些人?"));
System.out.println();
Assistant assistant = AiServices.builder(Assistant.class)
.chatLanguageModel(model)
.chatMemory(MessageWindowChatMemory.withMaxMessages(10))
.contentRetriever(contentRetriever)
.build();
// String chat = assistant.chat("参会人员有哪些人?");
//System.out.println(chat);
}
// 创建一个助手接口
interface Assistant {
String chat(String userMessage);
TokenStream chatStream(List<ChatMessage> messages);
TokenStream chatStream(ChatMessage message);
TokenStream chatStream(String message);
}
}

@ -1,33 +0,0 @@
package xyz.wbsite.ai;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.chat.request.ChatRequest;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.openai.OpenAiChatModel;
/**
*
*/
public class TestSimpleChat {
public static void main(String[] args) {
OpenAiChatModel model = OpenAiChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.build();
String generate = model.chat("你好");
System.out.println(generate);
ChatRequest chatRequest = ChatRequest.builder()
.messages(new ChatMessage[]{
UserMessage.from("你是谁")
})
.build();
ChatResponse chatResponse = model.chat(chatRequest);
System.out.println(chatResponse.aiMessage().text());
}
}

@ -1,54 +0,0 @@
package xyz.wbsite.ai;
import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.thread.ThreadUtil;
import dev.langchain4j.data.message.AiMessage;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.model.StreamingResponseHandler;
import dev.langchain4j.model.chat.response.ChatResponse;
import dev.langchain4j.model.chat.response.StreamingChatResponseHandler;
import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.output.Response;
import java.util.List;
/**
*
*/
public class TestStreamChat {
public static void main(String[] args) {
OpenAiStreamingChatModel model = OpenAiStreamingChatModel.builder()
.baseUrl("http://36.138.207.178:11434/v1")
.apiKey("1")
.modelName("deepseek-r1:14B")
.build();
List<ChatMessage> messages = CollUtil.newArrayList(
UserMessage.from("假如树上有10只鸟10分钟前飞走了2只5分钟前又飞回了1只刚刚又来了3只那现在树上有几只鸟?")
);
ThreadUtil.execAsync(new Runnable() {
@Override
public void run() {
model.chat(messages, new StreamingChatResponseHandler(){
@Override
public void onPartialResponse(String s) {
System.out.print(s);
}
@Override
public void onCompleteResponse(ChatResponse chatResponse) {
System.out.println("onComplete");
}
@Override
public void onError(Throwable throwable) {
System.err.println(throwable.getMessage());
}
});
}
});
}
}
Loading…
Cancel
Save

Powered by TurnKey Linux.