Coverage for src/ollamapy/main.py: 78%

89 statements  

« prev     ^ index     » next       coverage.py v7.10.6, created at 2025-09-01 12:29 -0400

1"""Main module with Ollama chat functionality.""" 

2 

3import argparse 

4import sys 

5from typing import Optional 

6from .ollama_client import OllamaClient 

7from .model_manager import ModelManager 

8from .analysis_engine import AnalysisEngine 

9from .chat_session import ChatSession 

10from .terminal_interface import TerminalInterface 

11from .ai_query import AIQuery 

12 

13 

14def hello(): 

15 """Return a hello message.""" 

16 return "Hello, World!" 

17 

18 

19def greet(name): 

20 """Greet someone by name.""" 

21 return f"Hello, {name}!" 

22 

23 

24def chat( 

25 model: str = "gemma3:4b", 

26 system: str = "You are a helpful assistant.", 

27 analysis_model: str = "gemma3:4b", 

28): 

29 """Start a chat session with Ollama. 

30 

31 Args: 

32 model: The model to use for chat (default: gemma3:4b) 

33 system: Optional system message to set context 

34 analysis_model: Optional separate model for action analysis (defaults to main model) 

35 """ 

36 # Create the components 

37 client = OllamaClient() 

38 model_manager = ModelManager(client) 

39 analysis_engine = AnalysisEngine(analysis_model, client) 

40 chat_session = ChatSession(model, client, system) 

41 ai_query = AIQuery(client, model=analysis_model) 

42 

43 # Create and run the terminal interface 

44 terminal_interface = TerminalInterface( 

45 model_manager, analysis_engine, chat_session, ai_query 

46 ) 

47 terminal_interface.run() 

48 

49 

50def run_vibe_tests( 

51 model: str = "gemma3:4b", iterations: int = 1, analysis_model: str = "gemma3:4b" 

52): 

53 """Run built-in vibe tests. 

54 

55 Args: 

56 model: The model to use for testing (default: gemma3:4b) 

57 iterations: Number of iterations per test (default: 1) 

58 analysis_model: Optional separate model for action analysis (defaults to main model) 

59 """ 

60 from .vibe_tests import run_vibe_tests as run_tests 

61 

62 return run_tests(model=model, iterations=iterations, analysis_model=analysis_model) 

63 

64 

65def run_multi_model_vibe_tests(iterations: int = 5, output_path: str = None): 

66 """Run vibe tests across multiple configured models for comprehensive comparison. 

67 

68 Args: 

69 iterations: Number of iterations per test (default: 5) 

70 output_path: Path to save detailed JSON results for GitHub Pages 

71 """ 

72 from .multi_model_vibe_tests import run_multi_model_tests 

73 

74 return run_multi_model_tests(iterations=iterations, output_path=output_path) 

75 

76 

77def run_skill_gen( 

78 model: str = "gemma3:4b", 

79 analysis_model: Optional[str] = None, 

80 count: int = 1, 

81 ideas: Optional[list] = None, 

82): 

83 """Run automated skill generation. 

84 

85 Args: 

86 model: The model to use for generation (default: gemma3:4b) 

87 analysis_model: Optional model for vibe testing (defaults to main model) 

88 count: Number of skills to generate (default: 1) 

89 ideas: Optional list of specific skill ideas 

90 """ 

91 from .skill_generator import run_skill_generation 

92 

93 return run_skill_generation( 

94 model=model, analysis_model=analysis_model, count=count, ideas=ideas 

95 ) 

96 

97 

98def run_skill_editor(port: int = 5000, skills_directory: Optional[str] = None): 

99 """Run the interactive skill editor server. 

100 

101 Args: 

102 port: Port to run the server on (default: 5000) 

103 skills_directory: Directory containing skill files (default: auto-detect) 

104 """ 

105 try: 

106 from .skill_editor.api import SkillEditorAPI 

107 except ImportError as e: 

108 print(f"❌ Error: Missing dependencies for skill editor.") 

109 print(f"Please install Flask and flask-cors:") 

110 print(f" pip install flask flask-cors") 

111 return False 

112 

113 api = SkillEditorAPI(skills_directory=skills_directory, port=port) 

114 api.run() 

115 return True 

116 

117 

118def main(): 

119 """CLI entry point.""" 

120 parser = argparse.ArgumentParser( 

121 description="OllamaPy v0.8.0 - Terminal chat interface for Ollama with AI skills system", 

122 formatter_class=argparse.RawDescriptionHelpFormatter, 

123 epilog=""" 

124Examples: 

125 ollamapy # Start chat with default model (gemma3:4b) 

126 ollamapy --model llama3.2:3b # Use a specific model 

127 ollamapy --analysis-model gemma2:2b --model llama3.2:7b # Use small model for analysis, large for chat 

128 ollamapy --system "You are a helpful coding assistant" # Set system message 

129 ollamapy --vibetest # Run vibe tests with default settings 

130 ollamapy --vibetest -n 5 # Run vibe tests with 5 iterations each 

131 ollamapy --vibetest --model llama3.2:3b -n 3 # Custom model and iterations 

132 ollamapy --vibetest --analysis-model gemma2:2b --model llama3.2:7b # Separate models for testing 

133 ollamapy --skillgen # Generate a new skill automatically 

134 ollamapy --skillgen --count 5 # Generate 5 new skills 

135 ollamapy --skillgen --idea "analyze CSV data" # Generate specific skill 

136 ollamapy --skillgen --count 3 --model llama3.2:7b # Use specific model 

137 ollamapy --skill-editor # Launch interactive skill editor web interface 

138 ollamapy --skill-editor --port 8080 # Use custom port for skill editor 

139 """, 

140 ) 

141 

142 parser.add_argument( 

143 "--model", 

144 "-m", 

145 default="gemma3:4b", 

146 help="Model to use for chat responses (default: gemma3:4b)", 

147 ) 

148 

149 parser.add_argument( 

150 "--analysis-model", 

151 "-a", 

152 help="Model to use for action analysis (defaults to main model if not specified). Use a smaller, faster model for better performance.", 

153 ) 

154 

155 parser.add_argument( 

156 "--system", "-s", help="System message to set context for the AI" 

157 ) 

158 

159 parser.add_argument( 

160 "--hello", action="store_true", help="Just print hello and exit (for testing)" 

161 ) 

162 

163 parser.add_argument( 

164 "--vibetest", 

165 action="store_true", 

166 help="Run built-in vibe tests to evaluate AI decision-making consistency", 

167 ) 

168 

169 parser.add_argument( 

170 "--multi-model-vibetest", 

171 action="store_true", 

172 help="Run vibe tests across multiple configured models for comprehensive comparison", 

173 ) 

174 

175 parser.add_argument( 

176 "--skillgen", 

177 action="store_true", 

178 help="Generate new skills automatically using AI", 

179 ) 

180 

181 parser.add_argument( 

182 "--count", 

183 "-c", 

184 type=int, 

185 default=1, 

186 help="Number of skills to generate (default: 1, used with --skillgen)", 

187 ) 

188 

189 parser.add_argument( 

190 "--idea", 

191 "-i", 

192 action="append", 

193 help="Specific skill idea to generate (can be used multiple times)", 

194 ) 

195 

196 parser.add_argument( 

197 "-n", 

198 "--iterations", 

199 type=int, 

200 default=1, 

201 help="Number of iterations for vibe tests (default: 1)", 

202 ) 

203 

204 parser.add_argument( 

205 "--skill-editor", 

206 action="store_true", 

207 help="Launch interactive skill editor web interface", 

208 ) 

209 

210 parser.add_argument( 

211 "--port", 

212 "-p", 

213 type=int, 

214 default=5000, 

215 help="Port for skill editor server (default: 5000)", 

216 ) 

217 

218 parser.add_argument( 

219 "--skills-dir", 

220 help="Directory containing skill files (auto-detected if not specified)", 

221 ) 

222 

223 args = parser.parse_args() 

224 

225 if args.hello: 

226 print(hello()) 

227 print(greet("Python")) 

228 elif args.vibetest: 

229 success = run_vibe_tests( 

230 model=args.model, 

231 iterations=args.iterations, 

232 analysis_model=args.analysis_model, 

233 ) 

234 sys.exit(0 if success else 1) 

235 elif args.multi_model_vibetest: 

236 # Save results to docs for GitHub Pages integration 

237 from pathlib import Path 

238 

239 project_root = Path(__file__).parent.parent.parent 

240 output_path = project_root / "docs" / "vibe_test_results.json" 

241 

242 success = run_multi_model_vibe_tests( 

243 iterations=args.iterations, output_path=str(output_path) 

244 ) 

245 

246 if success: 

247 print(f"🚀 Running vibe test showcase generator...") 

248 try: 

249 import subprocess 

250 

251 result = subprocess.run( 

252 [ 

253 "python3", 

254 str( 

255 project_root / "scripts" / "generate_vibe_test_showcase.py" 

256 ), 

257 ], 

258 capture_output=True, 

259 text=True, 

260 ) 

261 

262 if result.returncode == 0: 

263 print("✅ Vibe test showcase generated successfully!") 

264 else: 

265 print(f"❌ Error generating showcase: {result.stderr}") 

266 except Exception as e: 

267 print(f"❌ Error running showcase generator: {e}") 

268 

269 sys.exit(0 if success else 1) 

270 elif args.skillgen: 

271 analysis_model = args.analysis_model if args.analysis_model else args.model 

272 success = run_skill_gen( 

273 model=args.model, 

274 analysis_model=analysis_model, 

275 count=args.count, 

276 ideas=args.idea, 

277 ) 

278 sys.exit(0 if success else 1) 

279 elif args.skill_editor: 

280 success = run_skill_editor(port=args.port, skills_directory=args.skills_dir) 

281 sys.exit(0 if success else 1) 

282 else: 

283 chat( 

284 model=args.model, 

285 system=args.system, 

286 analysis_model=args.analysis_model if args.analysis_model else "gemma3:4b", 

287 ) 

288 

289 

290if __name__ == "__main__": 

291 main()