Coverage for src/ollamapy/terminal_interface.py: 12%
146 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
1"""Terminal-based chat interface for Ollama."""
3import sys
4from typing import List, Tuple, Dict, Any
5from .model_manager import ModelManager
6from .analysis_engine import AnalysisEngine
7from .chat_session import ChatSession
8from .skills import (
9 get_available_actions,
10 execute_action,
11 get_action_logs,
12 select_and_execute_action,
13 SKILL_REGISTRY,
14)
15from .ai_query import AIQuery
18class TerminalInterface:
19 """Terminal-based chat interface with AI meta-reasoning."""
21 def __init__(
22 self,
23 model_manager: ModelManager,
24 analysis_engine: AnalysisEngine,
25 chat_session: ChatSession,
26 ai_query: AIQuery,
27 ):
28 """Initialize the terminal interface.
30 Args:
31 model_manager: The ModelManager instance
32 analysis_engine: The AnalysisEngine instance
33 chat_session: The ChatSession instance
34 ai_query: The AIQuery instance for structured queries
35 """
36 self.model_manager = model_manager
37 self.analysis_engine = analysis_engine
38 self.chat_session = chat_session
39 self.ai_query = ai_query
40 self.actions = get_available_actions()
42 def setup(self) -> bool:
43 """Setup the chat environment and ensure models are available."""
44 print("🤖 OllamaPy Multi-Action Chat Interface")
45 print("=" * 50)
47 # Check if Ollama is running and ensure models are available
48 success, main_status, analysis_status = (
49 self.model_manager.ensure_models_available(
50 self.chat_session.model, self.analysis_engine.analysis_model
51 )
52 )
54 if not success:
55 print("❌ Error: Ollama server is not running!")
56 print("Please start Ollama with: ollama serve")
57 return False
59 # Display model status
60 self.model_manager.display_model_status(
61 self.chat_session.model, self.analysis_engine.analysis_model
62 )
64 print(
65 f"\n🧠 Multi-action system: AI evaluates ALL {len(self.actions)} actions for every query"
66 )
67 for action_name, action_info in self.actions.items():
68 params = action_info.get("parameters", {})
69 if params:
70 param_list = ", ".join(
71 [f"{p}: {info['type']}" for p, info in params.items()]
72 )
73 print(f" • {action_name} ({param_list})")
74 else:
75 print(f" • {action_name}")
76 print(
77 "\n💬 Chat started! Type 'quit', 'exit', or 'bye' to end the conversation."
78 )
79 print(" Type 'clear' to clear conversation history.")
80 print(" Type 'help' for more commands.\n")
82 return True
84 def print_help(self):
85 """Print help information."""
86 print("\n📖 Available commands:")
87 print(" quit, exit, bye - End the conversation")
88 print(" clear - Clear conversation history")
89 print(" help - Show this help message")
90 print(" model - Show current models")
91 print(" models - List available models")
92 print(" actions - Show available actions the AI can choose")
93 print(
94 " action - Manually trigger AI action selection based on context"
95 )
96 print(
97 f"\n🧠 Multi-action: The AI evaluates ALL actions and can run multiple per query."
98 )
99 print()
101 def handle_command(self, user_input: str) -> bool:
102 """Handle special commands. Returns True if command was handled and should exit."""
103 command = user_input.lower().strip()
105 if command in ["quit", "exit", "bye"]:
106 print("\n👋 Goodbye! Thanks for chatting!")
107 return True
109 elif command == "clear":
110 self.chat_session.clear_conversation()
111 print("🧹 Conversation history cleared!")
112 return False
114 elif command == "help":
115 self.print_help()
116 return False
118 elif command == "model":
119 print(f"🎯 Chat model: {self.chat_session.model}")
120 if self.analysis_engine.analysis_model != self.chat_session.model:
121 print(f"🔍 Analysis model: {self.analysis_engine.analysis_model}")
122 else:
123 print("🔍 Using same model for analysis and chat")
124 return False
126 elif command == "models":
127 models = self.model_manager.list_available_models()
128 if models:
129 print(f"📚 Available models: {', '.join(models)}")
130 else:
131 print("❌ No models found")
132 return False
134 elif command == "actions":
135 print(f"🔧 Available actions ({len(self.actions)}):")
136 for name, info in self.actions.items():
137 params = info.get("parameters", {})
138 if params:
139 param_list = ", ".join(
140 [f"{p}: {spec['type']}" for p, spec in params.items()]
141 )
142 print(f" • {name}({param_list}): {info['description']}")
143 else:
144 print(f" • {name}: {info['description']}")
145 return False
147 elif command == "action":
148 print("🤖 Selecting an action based on conversation context...")
149 context = "\n".join(
150 [
151 f"{m['role']}: {m['content']}"
152 for m in self.chat_session.messages[-5:]
153 ]
154 )
155 select_and_execute_action(self.ai_query, context)
156 logs = get_action_logs()
157 if logs:
158 print("📝 Action Logs:")
159 for log_entry in logs:
160 print(f" - {log_entry}")
161 else:
162 print("No actions were executed or no logs were produced.")
163 print()
164 return False
166 return False
168 def get_user_input(self) -> str:
169 """Get user input with a nice prompt."""
170 try:
171 return input("👤 You: ").strip()
172 except KeyboardInterrupt:
173 print("\n\n👋 Goodbye! Thanks for chatting!")
174 sys.exit(0)
175 except EOFError:
176 print("\n\n👋 Goodbye! Thanks for chatting!")
177 sys.exit(0)
179 def execute_multiple_actions(
180 self,
181 actions_with_params: List[Tuple[str, Dict[str, Any]]],
182 user_input: str = "",
183 ) -> str:
184 """Execute multiple actions and collect their log outputs.
186 Args:
187 actions_with_params: List of (action_name, parameters) tuples
189 Returns:
190 Combined log output from all actions
191 """
192 if not actions_with_params:
193 return ""
195 # Clear any previous logs
196 # clear_action_logs()
198 print(f"🚀 Executing {len(actions_with_params)} action(s)...")
200 for action_name, parameters in actions_with_params:
201 # Special handling for custom Python shell
202 if action_name == "customPythonShell":
203 print(f" Running {action_name} - generating custom script...")
204 # Generate a custom Python script using AI
205 script = self.analysis_engine.generate_custom_python_script(user_input)
206 print(" Executing generated script...")
207 # Execute the generated script
208 print(f" Script to be executed {script}")
209 result = SKILL_REGISTRY.execute_custom_python_script(script)
210 print(f" Script output: {result}")
212 else:
213 print(f" Running {action_name}", end="")
214 if parameters:
215 print(f" with {parameters}", end="")
216 print("...")
218 # Execute the action (it will log internally)
219 execute_action(action_name, parameters)
221 # Get all the logs that were generated
222 combined_logs = get_action_logs()
224 if combined_logs:
225 print(f"📝 Actions generated {len(combined_logs)} log entries")
227 return "\n".join(combined_logs)
229 def generate_ai_response_with_context(self, user_input: str, action_logs: str):
230 """Generate AI response with action context from logs.
232 Args:
233 user_input: The original user input
234 action_logs: The combined log output from all executed actions
235 """
236 # Show which model is being used for chat response
237 chat_model_display = self.chat_session.model
238 if self.analysis_engine.analysis_model != self.chat_session.model:
239 print(f"🤖 Chat model ({chat_model_display}): ", end="", flush=True)
240 else:
241 print("🤖 AI: ", end="", flush=True)
243 try:
244 for chunk in self.chat_session.stream_response_with_context(
245 user_input, action_logs
246 ):
247 print(chunk, end="", flush=True)
249 print() # New line after response
251 except Exception as e:
252 print(f"\n❌ Error generating response: {e}")
254 def chat_loop(self):
255 """Main chat loop with multi-action execution."""
256 while True:
257 user_input = self.get_user_input()
259 if not user_input:
260 continue
262 # Handle commands
263 if self.handle_command(user_input):
264 break
266 # Select ALL applicable actions and extract their parameters
267 selected_actions = self.analysis_engine.select_all_applicable_actions(
268 user_input
269 )
271 # Execute all selected actions and collect logs
272 action_logs = self.execute_multiple_actions(selected_actions, user_input)
274 # Generate AI response with action context from logs
275 self.generate_ai_response_with_context(user_input, action_logs)
277 print() # Extra line for readability
279 def run(self):
280 """Run the chat interface."""
281 if self.setup():
282 self.chat_loop()