Coverage for src/ollamapy/chat_session.py: 27%
52 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
1"""Chat session management for conversation state and response generation."""
3from typing import List, Dict
4from .ollama_client import OllamaClient
7class ChatSession:
8 """Manages conversation state and AI response generation."""
10 def __init__(self, model: str, client: OllamaClient, system_message: str):
11 """Initialize the chat session.
13 Args:
14 model: The model to use for chat responses
15 client: The OllamaClient instance
16 system_message: The system message to set context
17 """
18 self.model = model
19 self.client = client
20 self.system_message = (
21 system_message
22 if system_message
23 else "You are a straight forward and powerful assistant. You are basically a Janet from the Good Place but just a tad sassy to stay engaging. Make sure the user"
24 )
25 self.conversation: List[Dict[str, str]] = []
27 def add_user_message(self, message: str):
28 """Add a user message to the conversation history.
30 Args:
31 message: The user's message
32 """
33 self.conversation.append({"role": "user", "content": message})
35 def add_assistant_message(self, message: str):
36 """Add an assistant message to the conversation history.
38 Args:
39 message: The assistant's message
40 """
41 self.conversation.append({"role": "assistant", "content": message})
43 def clear_conversation(self):
44 """Clear the conversation history."""
45 self.conversation.clear_conversation()
47 def get_conversation_history(self) -> List[Dict[str, str]]:
48 """Get the current conversation history.
50 Returns:
51 List of conversation messages
52 """
53 return self.conversation.copy()
55 def generate_response_with_context(
56 self, user_input: str, action_logs: str = None
57 ) -> str:
58 """Generate AI response with optional action context from logs.
60 Args:
61 user_input: The original user input
62 action_logs: Optional combined log output from executed actions
64 Returns:
65 The generated response content
66 """
67 # Add user message to conversation
68 self.add_user_message(user_input)
70 # Build the AI's context message
71 if action_logs:
72 # Actions produced logs - include them as context
73 context_message = f"""<context>
74The following information was gathered from various tools and actions:
76{action_logs}
78Use this information to provide a comprehensive and accurate response to the user.
79</context>"""
80 else:
81 # No actions executed - just normal chat
82 context_message = None
84 # Prepare messages for the AI
85 messages_for_ai = self.conversation.copy()
87 # If we have action context, add it as a system-like message
88 if context_message:
89 messages_for_ai.append({"role": "system", "content": context_message})
91 response_content = ""
92 try:
93 for chunk in self.client.chat_stream(
94 model=self.model, messages=messages_for_ai, system=self.system_message
95 ):
96 response_content += chunk
98 # Add AI response to conversation (without the action context)
99 self.add_assistant_message(response_content)
101 return response_content
103 except Exception as e:
104 error_msg = f"Error generating response: {e}"
105 print(f"\n❌ {error_msg}")
106 return error_msg
108 def stream_response_with_context(self, user_input: str, action_logs: str = None):
109 """Stream AI response with optional action context, yielding chunks.
111 Args:
112 user_input: The original user input
113 action_logs: Optional combined log output from executed actions
115 Yields:
116 Response chunks as they arrive
117 """
118 # Add user message to conversation
119 self.add_user_message(user_input)
121 # Build the AI's context message
122 if action_logs:
123 # Actions produced logs - include them as context
124 context_message = f"""<context>
125Snap judgements made by a reasoning model has led to multiple responses to get triggered automatically. The logs to all of the actions that were executed are below. Please keep in mind the user's intent and understand that some of these responses that get triggered may in fact not be helpful to crafting the response to the user. Here are the complete logs of the last snap judgement's response logs:
126\n
127{action_logs}
128\n
129If this information is useful to the user's request then use this information to help you. If there is information that is not helpful to the user's request then ignore it completely and do not remark on it. This is only possibly helpful context.\nThis is likely the last thing before responding to the user you will get. Respond to the user now, and apologies for repeat instructions. Do not respond to this context, respond to the oringinal user input: {user_input}.
130</context>"""
131 else:
132 # No actions executed - just normal chat
133 context_message = None
135 # Prepare messages for the AI
136 messages_for_ai = self.conversation.copy()
138 # If we have action context, add it as a system-like message
139 if context_message:
140 messages_for_ai.append({"role": "system", "content": context_message})
142 response_content = ""
143 try:
144 for chunk in self.client.chat_stream(
145 model=self.model, messages=messages_for_ai, system=self.system_message
146 ):
147 response_content += chunk
148 yield chunk
150 # Add AI response to conversation (without the action context)
151 self.add_assistant_message(response_content)
153 except Exception as e:
154 error_msg = f"Error generating response: {e}"
155 print(f"\n❌ {error_msg}")
156 yield error_msg