pub struct ChatSession { /* private fields */ }Expand description
A chat session that manages conversation history with context window limits.
ChatSession maintains a list of messages and automatically trims old messages
when the total token count exceeds the configured maximum context size. The system
prompt (first message) is always preserved.
§Examples
use mojentic::llm::{ChatSession, LlmBroker};
use mojentic::llm::gateways::OllamaGateway;
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let gateway = Arc::new(OllamaGateway::default());
let broker = LlmBroker::new("qwen3:32b", gateway);
let mut session = ChatSession::new(broker);
let response = session.send("What is Rust?").await?;
println!("Response: {}", response);
Ok(())
}Implementations§
Source§impl ChatSession
impl ChatSession
Sourcepub fn new(broker: LlmBroker) -> Self
pub fn new(broker: LlmBroker) -> Self
Create a new chat session with default settings.
§Arguments
broker- The LLM broker to use for generating responses
§Examples
use mojentic::llm::{ChatSession, LlmBroker};
use mojentic::llm::gateways::OllamaGateway;
use std::sync::Arc;
let gateway = Arc::new(OllamaGateway::default());
let broker = LlmBroker::new("qwen3:32b", gateway);
let session = ChatSession::new(broker);Sourcepub fn builder(broker: LlmBroker) -> ChatSessionBuilder
pub fn builder(broker: LlmBroker) -> ChatSessionBuilder
Create a chat session builder for custom configuration.
§Arguments
broker- The LLM broker to use for generating responses
§Examples
use mojentic::llm::ChatSession;
let session = ChatSession::builder(broker)
.system_prompt("You are a helpful coding assistant.")
.temperature(0.7)
.max_context(16384)
.build();Sourcepub async fn send(&mut self, query: &str) -> Result<String>
pub async fn send(&mut self, query: &str) -> Result<String>
Send a message to the LLM and get a response.
This method:
- Adds the user message to the conversation history
- Generates a response using the LLM
- Adds the assistant’s response to the history
- Automatically trims old messages if context window is exceeded
§Arguments
query- The user’s message
§Returns
The LLM’s response as a string
§Examples
let response = session.send("What is 2 + 2?").await?;
println!("Answer: {}", response);Sourcepub fn send_stream<'a>(
&'a mut self,
query: &str,
) -> Pin<Box<dyn Stream<Item = Result<String>> + 'a>>
pub fn send_stream<'a>( &'a mut self, query: &str, ) -> Pin<Box<dyn Stream<Item = Result<String>> + 'a>>
Send a message to the LLM and get a streaming response.
This method:
- Adds the user message to the conversation history
- Streams the response from the LLM, yielding chunks as they arrive
- After the stream is fully consumed, adds the assembled response to history
- Automatically trims old messages if context window is exceeded
§Arguments
query- The user’s message
§Returns
A stream of string chunks from the LLM response
§Examples
use futures::stream::StreamExt;
let mut stream = session.send_stream("Tell me a story");
while let Some(result) = stream.next().await {
print!("{}", result?);
}Sourcepub fn insert_message(&mut self, message: LlmMessage)
pub fn insert_message(&mut self, message: LlmMessage)
Insert a message into the conversation history.
If the total token count exceeds max_context, the oldest messages
are removed until the total is under the limit. The system prompt
(index 0) is always preserved.
§Arguments
message- The message to add
Sourcepub fn messages(&self) -> &[SizedLlmMessage]
pub fn messages(&self) -> &[SizedLlmMessage]
Get the current conversation history
Sourcepub fn total_tokens(&self) -> usize
pub fn total_tokens(&self) -> usize
Get the total token count of the current conversation