Coverage for src/ollamapy/main.py: 78%
89 statements
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
« prev ^ index » next coverage.py v7.10.6, created at 2025-09-01 12:29 -0400
1"""Main module with Ollama chat functionality."""
3import argparse
4import sys
5from typing import Optional
6from .ollama_client import OllamaClient
7from .model_manager import ModelManager
8from .analysis_engine import AnalysisEngine
9from .chat_session import ChatSession
10from .terminal_interface import TerminalInterface
11from .ai_query import AIQuery
14def hello():
15 """Return a hello message."""
16 return "Hello, World!"
19def greet(name):
20 """Greet someone by name."""
21 return f"Hello, {name}!"
24def chat(
25 model: str = "gemma3:4b",
26 system: str = "You are a helpful assistant.",
27 analysis_model: str = "gemma3:4b",
28):
29 """Start a chat session with Ollama.
31 Args:
32 model: The model to use for chat (default: gemma3:4b)
33 system: Optional system message to set context
34 analysis_model: Optional separate model for action analysis (defaults to main model)
35 """
36 # Create the components
37 client = OllamaClient()
38 model_manager = ModelManager(client)
39 analysis_engine = AnalysisEngine(analysis_model, client)
40 chat_session = ChatSession(model, client, system)
41 ai_query = AIQuery(client, model=analysis_model)
43 # Create and run the terminal interface
44 terminal_interface = TerminalInterface(
45 model_manager, analysis_engine, chat_session, ai_query
46 )
47 terminal_interface.run()
50def run_vibe_tests(
51 model: str = "gemma3:4b", iterations: int = 1, analysis_model: str = "gemma3:4b"
52):
53 """Run built-in vibe tests.
55 Args:
56 model: The model to use for testing (default: gemma3:4b)
57 iterations: Number of iterations per test (default: 1)
58 analysis_model: Optional separate model for action analysis (defaults to main model)
59 """
60 from .vibe_tests import run_vibe_tests as run_tests
62 return run_tests(model=model, iterations=iterations, analysis_model=analysis_model)
65def run_multi_model_vibe_tests(iterations: int = 5, output_path: str = None):
66 """Run vibe tests across multiple configured models for comprehensive comparison.
68 Args:
69 iterations: Number of iterations per test (default: 5)
70 output_path: Path to save detailed JSON results for GitHub Pages
71 """
72 from .multi_model_vibe_tests import run_multi_model_tests
74 return run_multi_model_tests(iterations=iterations, output_path=output_path)
77def run_skill_gen(
78 model: str = "gemma3:4b",
79 analysis_model: Optional[str] = None,
80 count: int = 1,
81 ideas: Optional[list] = None,
82):
83 """Run automated skill generation.
85 Args:
86 model: The model to use for generation (default: gemma3:4b)
87 analysis_model: Optional model for vibe testing (defaults to main model)
88 count: Number of skills to generate (default: 1)
89 ideas: Optional list of specific skill ideas
90 """
91 from .skill_generator import run_skill_generation
93 return run_skill_generation(
94 model=model, analysis_model=analysis_model, count=count, ideas=ideas
95 )
98def run_skill_editor(port: int = 5000, skills_directory: Optional[str] = None):
99 """Run the interactive skill editor server.
101 Args:
102 port: Port to run the server on (default: 5000)
103 skills_directory: Directory containing skill files (default: auto-detect)
104 """
105 try:
106 from .skill_editor.api import SkillEditorAPI
107 except ImportError as e:
108 print(f"❌ Error: Missing dependencies for skill editor.")
109 print(f"Please install Flask and flask-cors:")
110 print(f" pip install flask flask-cors")
111 return False
113 api = SkillEditorAPI(skills_directory=skills_directory, port=port)
114 api.run()
115 return True
118def main():
119 """CLI entry point."""
120 parser = argparse.ArgumentParser(
121 description="OllamaPy v0.8.0 - Terminal chat interface for Ollama with AI skills system",
122 formatter_class=argparse.RawDescriptionHelpFormatter,
123 epilog="""
124Examples:
125 ollamapy # Start chat with default model (gemma3:4b)
126 ollamapy --model llama3.2:3b # Use a specific model
127 ollamapy --analysis-model gemma2:2b --model llama3.2:7b # Use small model for analysis, large for chat
128 ollamapy --system "You are a helpful coding assistant" # Set system message
129 ollamapy --vibetest # Run vibe tests with default settings
130 ollamapy --vibetest -n 5 # Run vibe tests with 5 iterations each
131 ollamapy --vibetest --model llama3.2:3b -n 3 # Custom model and iterations
132 ollamapy --vibetest --analysis-model gemma2:2b --model llama3.2:7b # Separate models for testing
133 ollamapy --skillgen # Generate a new skill automatically
134 ollamapy --skillgen --count 5 # Generate 5 new skills
135 ollamapy --skillgen --idea "analyze CSV data" # Generate specific skill
136 ollamapy --skillgen --count 3 --model llama3.2:7b # Use specific model
137 ollamapy --skill-editor # Launch interactive skill editor web interface
138 ollamapy --skill-editor --port 8080 # Use custom port for skill editor
139 """,
140 )
142 parser.add_argument(
143 "--model",
144 "-m",
145 default="gemma3:4b",
146 help="Model to use for chat responses (default: gemma3:4b)",
147 )
149 parser.add_argument(
150 "--analysis-model",
151 "-a",
152 help="Model to use for action analysis (defaults to main model if not specified). Use a smaller, faster model for better performance.",
153 )
155 parser.add_argument(
156 "--system", "-s", help="System message to set context for the AI"
157 )
159 parser.add_argument(
160 "--hello", action="store_true", help="Just print hello and exit (for testing)"
161 )
163 parser.add_argument(
164 "--vibetest",
165 action="store_true",
166 help="Run built-in vibe tests to evaluate AI decision-making consistency",
167 )
169 parser.add_argument(
170 "--multi-model-vibetest",
171 action="store_true",
172 help="Run vibe tests across multiple configured models for comprehensive comparison",
173 )
175 parser.add_argument(
176 "--skillgen",
177 action="store_true",
178 help="Generate new skills automatically using AI",
179 )
181 parser.add_argument(
182 "--count",
183 "-c",
184 type=int,
185 default=1,
186 help="Number of skills to generate (default: 1, used with --skillgen)",
187 )
189 parser.add_argument(
190 "--idea",
191 "-i",
192 action="append",
193 help="Specific skill idea to generate (can be used multiple times)",
194 )
196 parser.add_argument(
197 "-n",
198 "--iterations",
199 type=int,
200 default=1,
201 help="Number of iterations for vibe tests (default: 1)",
202 )
204 parser.add_argument(
205 "--skill-editor",
206 action="store_true",
207 help="Launch interactive skill editor web interface",
208 )
210 parser.add_argument(
211 "--port",
212 "-p",
213 type=int,
214 default=5000,
215 help="Port for skill editor server (default: 5000)",
216 )
218 parser.add_argument(
219 "--skills-dir",
220 help="Directory containing skill files (auto-detected if not specified)",
221 )
223 args = parser.parse_args()
225 if args.hello:
226 print(hello())
227 print(greet("Python"))
228 elif args.vibetest:
229 success = run_vibe_tests(
230 model=args.model,
231 iterations=args.iterations,
232 analysis_model=args.analysis_model,
233 )
234 sys.exit(0 if success else 1)
235 elif args.multi_model_vibetest:
236 # Save results to docs for GitHub Pages integration
237 from pathlib import Path
239 project_root = Path(__file__).parent.parent.parent
240 output_path = project_root / "docs" / "vibe_test_results.json"
242 success = run_multi_model_vibe_tests(
243 iterations=args.iterations, output_path=str(output_path)
244 )
246 if success:
247 print(f"🚀 Running vibe test showcase generator...")
248 try:
249 import subprocess
251 result = subprocess.run(
252 [
253 "python3",
254 str(
255 project_root / "scripts" / "generate_vibe_test_showcase.py"
256 ),
257 ],
258 capture_output=True,
259 text=True,
260 )
262 if result.returncode == 0:
263 print("✅ Vibe test showcase generated successfully!")
264 else:
265 print(f"❌ Error generating showcase: {result.stderr}")
266 except Exception as e:
267 print(f"❌ Error running showcase generator: {e}")
269 sys.exit(0 if success else 1)
270 elif args.skillgen:
271 analysis_model = args.analysis_model if args.analysis_model else args.model
272 success = run_skill_gen(
273 model=args.model,
274 analysis_model=analysis_model,
275 count=args.count,
276 ideas=args.idea,
277 )
278 sys.exit(0 if success else 1)
279 elif args.skill_editor:
280 success = run_skill_editor(port=args.port, skills_directory=args.skills_dir)
281 sys.exit(0 if success else 1)
282 else:
283 chat(
284 model=args.model,
285 system=args.system,
286 analysis_model=args.analysis_model if args.analysis_model else "gemma3:4b",
287 )
290if __name__ == "__main__":
291 main()