updates
This commit is contained in:
7
.stylua.toml
Normal file
7
.stylua.toml
Normal file
@@ -0,0 +1,7 @@
|
||||
column_width = 120
|
||||
line_endings = "Unix"
|
||||
indent_type = "Tabs"
|
||||
indent_width = 4
|
||||
quote_style = "AutoPreferDouble"
|
||||
call_parentheses = "Input"
|
||||
collapse_simple_statement = "Never"
|
||||
250
DOCUMENTATION_INDEX.md
Normal file
250
DOCUMENTATION_INDEX.md
Normal file
@@ -0,0 +1,250 @@
|
||||
# 📑 Documentation Index
|
||||
|
||||
## 🎯 Quick Navigation
|
||||
|
||||
### 🚀 Getting Started (Start Here!)
|
||||
1. **[START_HERE.md](START_HERE.md)** - 5-minute quick start guide
|
||||
2. **[IMPLEMENTATION_SUMMARY.md](IMPLEMENTATION_SUMMARY.md)** - Overview of what was done
|
||||
|
||||
### 📖 Main Documentation
|
||||
3. **[README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md)** - Complete overview and features
|
||||
|
||||
### 🔧 Setup & Configuration
|
||||
4. **[docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)** - Comprehensive setup guide
|
||||
5. **[docs/OLLAMA_QUICK_SETUP.md](docs/OLLAMA_QUICK_SETUP.md)** - Quick setup for other machines
|
||||
6. **[docs/ollama_env_example.sh](docs/ollama_env_example.sh)** - Shell configuration example
|
||||
|
||||
### 📚 Reference Materials
|
||||
7. **[docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)** - Quick reference card (print this!)
|
||||
8. **[docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)** - Network diagrams and data flow
|
||||
9. **[docs/IMPLEMENTATION_CHECKLIST.md](docs/IMPLEMENTATION_CHECKLIST.md)** - Step-by-step checklist
|
||||
|
||||
### 🆘 Troubleshooting & Help
|
||||
10. **[docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)** - Common issues and solutions
|
||||
11. **[docs/IMPLEMENTATION_COMPLETE.md](docs/IMPLEMENTATION_COMPLETE.md)** - Implementation details
|
||||
12. **[docs/INTEGRATION_SUMMARY.md](docs/INTEGRATION_SUMMARY.md)** - Summary of changes
|
||||
|
||||
---
|
||||
|
||||
## 📋 Reading Guide by Use Case
|
||||
|
||||
### "I just want to get it working quickly"
|
||||
1. Read: [START_HERE.md](START_HERE.md)
|
||||
2. Follow the 5-minute setup
|
||||
3. Done!
|
||||
|
||||
### "I want to understand how it works"
|
||||
1. Read: [README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md)
|
||||
2. Review: [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
|
||||
3. Check: [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
|
||||
### "I'm setting up for the first time"
|
||||
1. Read: [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)
|
||||
2. Follow: [docs/IMPLEMENTATION_CHECKLIST.md](docs/IMPLEMENTATION_CHECKLIST.md)
|
||||
3. Test: Use the testing section in [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)
|
||||
|
||||
### "I'm setting up other machines"
|
||||
1. Read: [docs/OLLAMA_QUICK_SETUP.md](docs/OLLAMA_QUICK_SETUP.md)
|
||||
2. Use: [docs/ollama_env_example.sh](docs/ollama_env_example.sh)
|
||||
3. Test: Follow the testing section
|
||||
|
||||
### "Something isn't working"
|
||||
1. Check: [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
2. Find your issue and follow the solution
|
||||
3. If still stuck, check [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) to understand the flow
|
||||
|
||||
### "I need a quick reference"
|
||||
1. Print: [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
2. Keep it handy while setting up
|
||||
|
||||
---
|
||||
|
||||
## 🗂️ File Organization
|
||||
|
||||
```
|
||||
Root Level
|
||||
├── START_HERE.md ........................ 5-minute quick start
|
||||
├── IMPLEMENTATION_SUMMARY.md ........... Overview of changes
|
||||
├── README_OLLAMA_INTEGRATION.md ........ Complete guide
|
||||
├── DOCUMENTATION_INDEX.md ............. This file
|
||||
│
|
||||
└── docs/
|
||||
├── OLLAMA_SETUP.md ................ Full setup guide
|
||||
├── OLLAMA_QUICK_SETUP.md .......... Quick setup for other machines
|
||||
├── QUICK_REFERENCE.md ............ Quick reference card
|
||||
├── ARCHITECTURE.md ............... Network diagrams
|
||||
├── TROUBLESHOOTING.md ............ Common issues
|
||||
├── IMPLEMENTATION_CHECKLIST.md ... Step-by-step checklist
|
||||
├── IMPLEMENTATION_COMPLETE.md .... Implementation details
|
||||
├── INTEGRATION_SUMMARY.md ........ Summary of changes
|
||||
└── ollama_env_example.sh ......... Shell config example
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Document Purposes
|
||||
|
||||
| Document | Purpose | Length | Audience |
|
||||
|----------|---------|--------|----------|
|
||||
| START_HERE.md | Quick start | 2 min | Everyone |
|
||||
| IMPLEMENTATION_SUMMARY.md | Overview | 5 min | Everyone |
|
||||
| README_OLLAMA_INTEGRATION.md | Complete guide | 15 min | Everyone |
|
||||
| OLLAMA_SETUP.md | Detailed setup | 20 min | First-time setup |
|
||||
| OLLAMA_QUICK_SETUP.md | Quick setup | 5 min | Other machines |
|
||||
| QUICK_REFERENCE.md | Reference card | 2 min | Quick lookup |
|
||||
| ARCHITECTURE.md | Technical details | 10 min | Understanding flow |
|
||||
| TROUBLESHOOTING.md | Problem solving | 15 min | When issues occur |
|
||||
| IMPLEMENTATION_CHECKLIST.md | Step-by-step | 20 min | Following setup |
|
||||
| IMPLEMENTATION_COMPLETE.md | Details | 10 min | Understanding changes |
|
||||
| INTEGRATION_SUMMARY.md | Summary | 5 min | Overview |
|
||||
| ollama_env_example.sh | Config example | 2 min | Setting env vars |
|
||||
|
||||
---
|
||||
|
||||
## 🔍 Find What You Need
|
||||
|
||||
### By Topic
|
||||
|
||||
**Setup & Installation**
|
||||
- [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md) - Full setup
|
||||
- [docs/OLLAMA_QUICK_SETUP.md](docs/OLLAMA_QUICK_SETUP.md) - Quick setup
|
||||
- [docs/IMPLEMENTATION_CHECKLIST.md](docs/IMPLEMENTATION_CHECKLIST.md) - Checklist
|
||||
|
||||
**Configuration**
|
||||
- [docs/ollama_env_example.sh](docs/ollama_env_example.sh) - Environment variables
|
||||
- [README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md) - Configuration details
|
||||
|
||||
**Understanding**
|
||||
- [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) - How it works
|
||||
- [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md) - Quick facts
|
||||
- [IMPLEMENTATION_SUMMARY.md](IMPLEMENTATION_SUMMARY.md) - What changed
|
||||
|
||||
**Troubleshooting**
|
||||
- [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md) - Common issues
|
||||
- [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md) - Quick fixes
|
||||
|
||||
**Reference**
|
||||
- [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md) - Print this!
|
||||
- [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md) - Diagrams
|
||||
|
||||
---
|
||||
|
||||
## ⏱️ Time Estimates
|
||||
|
||||
| Task | Time | Document |
|
||||
|------|------|----------|
|
||||
| Quick start | 5 min | START_HERE.md |
|
||||
| Full setup | 20 min | docs/OLLAMA_SETUP.md |
|
||||
| Other machines | 5 min | docs/OLLAMA_QUICK_SETUP.md |
|
||||
| Understanding | 15 min | docs/ARCHITECTURE.md |
|
||||
| Troubleshooting | 10 min | docs/TROUBLESHOOTING.md |
|
||||
| Reference lookup | 2 min | docs/QUICK_REFERENCE.md |
|
||||
|
||||
---
|
||||
|
||||
## 🎓 Learning Path
|
||||
|
||||
### Beginner (Just want it to work)
|
||||
```
|
||||
START_HERE.md
|
||||
↓
|
||||
Follow 5-minute setup
|
||||
↓
|
||||
Test with <leader>cll
|
||||
↓
|
||||
Done!
|
||||
```
|
||||
|
||||
### Intermediate (Want to understand)
|
||||
```
|
||||
START_HERE.md
|
||||
↓
|
||||
README_OLLAMA_INTEGRATION.md
|
||||
↓
|
||||
docs/ARCHITECTURE.md
|
||||
↓
|
||||
docs/QUICK_REFERENCE.md
|
||||
↓
|
||||
Ready to use and troubleshoot
|
||||
```
|
||||
|
||||
### Advanced (Want all details)
|
||||
```
|
||||
IMPLEMENTATION_SUMMARY.md
|
||||
↓
|
||||
docs/OLLAMA_SETUP.md
|
||||
↓
|
||||
docs/ARCHITECTURE.md
|
||||
↓
|
||||
docs/TROUBLESHOOTING.md
|
||||
↓
|
||||
docs/IMPLEMENTATION_CHECKLIST.md
|
||||
↓
|
||||
Full understanding and mastery
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔗 Cross-References
|
||||
|
||||
### From START_HERE.md
|
||||
- See full guide: [README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md)
|
||||
- Quick reference: [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
- Troubleshooting: [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
|
||||
### From README_OLLAMA_INTEGRATION.md
|
||||
- Quick start: [START_HERE.md](START_HERE.md)
|
||||
- Architecture: [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
|
||||
- Troubleshooting: [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
|
||||
### From docs/OLLAMA_SETUP.md
|
||||
- Quick setup: [docs/OLLAMA_QUICK_SETUP.md](docs/OLLAMA_QUICK_SETUP.md)
|
||||
- Checklist: [docs/IMPLEMENTATION_CHECKLIST.md](docs/IMPLEMENTATION_CHECKLIST.md)
|
||||
- Troubleshooting: [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
|
||||
### From docs/TROUBLESHOOTING.md
|
||||
- Full setup: [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)
|
||||
- Architecture: [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
|
||||
- Quick reference: [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
|
||||
---
|
||||
|
||||
## 📊 Documentation Statistics
|
||||
|
||||
- **Total Documents**: 12
|
||||
- **Total Pages**: ~100
|
||||
- **Setup Guides**: 3
|
||||
- **Reference Materials**: 3
|
||||
- **Troubleshooting**: 1
|
||||
- **Checklists**: 1
|
||||
- **Examples**: 1
|
||||
- **Summaries**: 2
|
||||
|
||||
---
|
||||
|
||||
## ✅ Checklist for Using Documentation
|
||||
|
||||
- [ ] Read START_HERE.md first
|
||||
- [ ] Bookmark docs/QUICK_REFERENCE.md
|
||||
- [ ] Print docs/QUICK_REFERENCE.md
|
||||
- [ ] Follow docs/OLLAMA_SETUP.md for setup
|
||||
- [ ] Use docs/TROUBLESHOOTING.md if issues occur
|
||||
- [ ] Review docs/ARCHITECTURE.md to understand flow
|
||||
- [ ] Keep docs/ollama_env_example.sh handy for other machines
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Next Steps
|
||||
|
||||
1. **Start Here**: [START_HERE.md](START_HERE.md)
|
||||
2. **Full Guide**: [README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md)
|
||||
3. **Setup**: [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)
|
||||
4. **Reference**: [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
|
||||
---
|
||||
|
||||
**Last Updated**: 2026-02-05
|
||||
|
||||
**Total Documentation**: 12 files
|
||||
|
||||
**Status**: ✅ Complete and Ready to Use
|
||||
190
FINAL_SUMMARY.md
Normal file
190
FINAL_SUMMARY.md
Normal file
@@ -0,0 +1,190 @@
|
||||
# 🎉 Implementation Complete!
|
||||
|
||||
## Summary
|
||||
|
||||
Your CodeCompanion configuration has been successfully updated to support **Ollama** with **Tailscale** network access.
|
||||
|
||||
## What You Get
|
||||
|
||||
✅ **Local Ollama Access** - Use Ollama on your main machine
|
||||
✅ **Remote Access** - Access Ollama from other machines via Tailscale
|
||||
✅ **Easy Switching** - Switch between Claude and Ollama with keymaps
|
||||
✅ **Secure** - All traffic encrypted via Tailscale
|
||||
✅ **Flexible** - Works with any Ollama model
|
||||
✅ **Well Documented** - 14 comprehensive documentation files
|
||||
|
||||
## Files Modified
|
||||
|
||||
### Configuration
|
||||
- `lua/shelbybark/plugins/codecompanion.lua` - Added Ollama adapter and keymaps
|
||||
|
||||
### Documentation (14 files)
|
||||
- `START_HERE.md` - 5-minute quick start
|
||||
- `IMPLEMENTATION_SUMMARY.md` - Overview of changes
|
||||
- `README_OLLAMA_INTEGRATION.md` - Complete guide
|
||||
- `DOCUMENTATION_INDEX.md` - Navigation guide
|
||||
- `docs/OLLAMA_SETUP.md` - Full setup guide
|
||||
- `docs/OLLAMA_QUICK_SETUP.md` - Quick setup for other machines
|
||||
- `docs/QUICK_REFERENCE.md` - Quick reference card
|
||||
- `docs/ARCHITECTURE.md` - Network diagrams
|
||||
- `docs/TROUBLESHOOTING.md` - Common issues and solutions
|
||||
- `docs/IMPLEMENTATION_CHECKLIST.md` - Step-by-step checklist
|
||||
- `docs/IMPLEMENTATION_COMPLETE.md` - Implementation details
|
||||
- `docs/INTEGRATION_SUMMARY.md` - Summary of changes
|
||||
- `docs/ollama_env_example.sh` - Shell configuration example
|
||||
|
||||
## Quick Start (5 Minutes)
|
||||
|
||||
### Step 1: Configure Ollama Server
|
||||
```bash
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
sudo systemctl restart ollama
|
||||
ollama pull mistral
|
||||
tailscale ip -4 # Note the IP
|
||||
```
|
||||
|
||||
### Step 2: Configure Other Machines
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
# Add to ~/.zshrc or ~/.bashrc
|
||||
```
|
||||
|
||||
### Step 3: Use in Neovim
|
||||
```vim
|
||||
" Press <leader>cll to chat with Ollama
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
| Feature | Benefit |
|
||||
|---------|---------|
|
||||
| Environment-Based | No code changes on other machines |
|
||||
| Fallback Support | Works locally without configuration |
|
||||
| Network-Aware | Automatically uses Tailscale |
|
||||
| Easy Switching | Use keymaps to switch models |
|
||||
| Secure | Encrypted via Tailscale |
|
||||
| Flexible | Supports multiple models |
|
||||
|
||||
## Keymaps
|
||||
|
||||
```
|
||||
<leader>cll → Chat with Ollama
|
||||
<leader>cc → Chat with Claude Haiku
|
||||
<leader>cs → Chat with Claude Sonnet
|
||||
<leader>co → Chat with Claude Opus
|
||||
<leader>ca → Show CodeCompanion actions
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
### Start Here
|
||||
1. **[START_HERE.md](START_HERE.md)** - 5-minute quick start
|
||||
2. **[DOCUMENTATION_INDEX.md](DOCUMENTATION_INDEX.md)** - Navigation guide
|
||||
|
||||
### Setup
|
||||
3. **[docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)** - Full setup guide
|
||||
4. **[docs/OLLAMA_QUICK_SETUP.md](docs/OLLAMA_QUICK_SETUP.md)** - Quick setup
|
||||
|
||||
### Reference
|
||||
5. **[docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)** - Quick reference (print this!)
|
||||
6. **[docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)** - Network diagrams
|
||||
|
||||
### Help
|
||||
7. **[docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)** - Common issues
|
||||
8. **[README_OLLAMA_INTEGRATION.md](README_OLLAMA_INTEGRATION.md)** - Complete guide
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
Your Machines (Tailscale Network)
|
||||
│
|
||||
├─ Machine A (Ollama Server)
|
||||
│ └─ Ollama Service :11434
|
||||
│ └─ Tailscale IP: 100.123.45.67
|
||||
│
|
||||
├─ Machine B (Laptop)
|
||||
│ └─ Neovim + CodeCompanion
|
||||
│ └─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
│
|
||||
└─ Machine C (Desktop)
|
||||
└─ Neovim + CodeCompanion
|
||||
└─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
```
|
||||
|
||||
## Recommended Models
|
||||
|
||||
| Model | Size | Speed | Quality | Best For |
|
||||
|-------|------|-------|---------|----------|
|
||||
| **mistral** | 7B | ⚡⚡ | ⭐⭐⭐ | **Recommended** |
|
||||
| neural-chat | 7B | ⚡⚡ | ⭐⭐⭐ | Conversation |
|
||||
| orca-mini | 3B | ⚡⚡⚡ | ⭐⭐ | Quick answers |
|
||||
| llama2 | 7B | ⚡⚡ | ⭐⭐⭐ | General purpose |
|
||||
| dolphin-mixtral | 8x7B | ⚡ | ⭐⭐⭐⭐ | Complex tasks |
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
# Test Ollama is running
|
||||
curl http://localhost:11434/api/tags
|
||||
|
||||
# Test remote access
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
|
||||
# Test in Neovim
|
||||
nvim
|
||||
# Press <leader>cll
|
||||
# Type a message and press Enter
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Connection refused | Check Ollama: `ps aux \| grep ollama` |
|
||||
| Model not found | Pull it: `ollama pull mistral` |
|
||||
| Can't reach remote | Check Tailscale: `tailscale status` |
|
||||
| Env var not working | Reload shell: `source ~/.zshrc` |
|
||||
| Slow responses | Try smaller model: `ollama pull orca-mini` |
|
||||
|
||||
**Full troubleshooting**: See [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. ✅ Read [START_HERE.md](START_HERE.md)
|
||||
2. ✅ Follow the 5-minute setup
|
||||
3. ✅ Test with `<leader>cll` in Neovim
|
||||
4. ✅ Enjoy local LLM access across your network!
|
||||
|
||||
## Support
|
||||
|
||||
- **Setup Issues**: See [docs/OLLAMA_SETUP.md](docs/OLLAMA_SETUP.md)
|
||||
- **Troubleshooting**: See [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
- **Understanding**: See [docs/ARCHITECTURE.md](docs/ARCHITECTURE.md)
|
||||
- **Quick Reference**: See [docs/QUICK_REFERENCE.md](docs/QUICK_REFERENCE.md)
|
||||
|
||||
## Status
|
||||
|
||||
| Component | Status |
|
||||
|-----------|--------|
|
||||
| Configuration | ✅ Complete |
|
||||
| Documentation | ✅ Complete (14 files) |
|
||||
| Keymaps | ✅ Added |
|
||||
| Environment Support | ✅ Implemented |
|
||||
| Testing | ⏳ Ready for testing |
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Ready to Go!
|
||||
|
||||
**Start with**: [START_HERE.md](START_HERE.md)
|
||||
|
||||
**Questions?**: Check [DOCUMENTATION_INDEX.md](DOCUMENTATION_INDEX.md)
|
||||
|
||||
**Issues?**: Check [docs/TROUBLESHOOTING.md](docs/TROUBLESHOOTING.md)
|
||||
|
||||
---
|
||||
|
||||
**Date**: 2026-02-05
|
||||
**Status**: ✅ Ready to Use
|
||||
**Configuration Version**: 1.0
|
||||
243
IMPLEMENTATION_SUMMARY.md
Normal file
243
IMPLEMENTATION_SUMMARY.md
Normal file
@@ -0,0 +1,243 @@
|
||||
# 📊 Implementation Summary
|
||||
|
||||
## ✅ What Was Done
|
||||
|
||||
Your CodeCompanion configuration has been successfully updated to support **Ollama** with **Tailscale** network access.
|
||||
|
||||
## 📝 Files Modified
|
||||
|
||||
### 1. Configuration File (Modified)
|
||||
```
|
||||
lua/shelbybark/plugins/codecompanion.lua
|
||||
├─ Added Ollama adapter (lines 30-45)
|
||||
├─ Configured environment variable support
|
||||
└─ Added Ollama keymaps <leader>cll (lines 223-237)
|
||||
```
|
||||
|
||||
**Key Changes:**
|
||||
- Ollama adapter reads `OLLAMA_ENDPOINT` environment variable
|
||||
- Falls back to `http://localhost:11434` if not set
|
||||
- Default model: `mistral` (configurable)
|
||||
|
||||
## 📚 Documentation Created
|
||||
|
||||
### Main Entry Points
|
||||
1. **`START_HERE.md`** ← Begin here! (5-minute setup)
|
||||
2. **`README_OLLAMA_INTEGRATION.md`** ← Full overview
|
||||
|
||||
### Setup & Configuration
|
||||
3. **`docs/OLLAMA_SETUP.md`** - Comprehensive setup guide
|
||||
4. **`docs/OLLAMA_QUICK_SETUP.md`** - Quick reference for other machines
|
||||
5. **`docs/ollama_env_example.sh`** - Shell configuration example
|
||||
|
||||
### Reference & Troubleshooting
|
||||
6. **`docs/QUICK_REFERENCE.md`** - Quick reference card
|
||||
7. **`docs/ARCHITECTURE.md`** - Network diagrams and data flow
|
||||
8. **`docs/TROUBLESHOOTING.md`** - Common issues and solutions
|
||||
9. **`docs/IMPLEMENTATION_CHECKLIST.md`** - Step-by-step checklist
|
||||
10. **`docs/IMPLEMENTATION_COMPLETE.md`** - Implementation details
|
||||
11. **`docs/INTEGRATION_SUMMARY.md`** - Overview of changes
|
||||
|
||||
## 🎯 How to Use
|
||||
|
||||
### On Your Ollama Server Machine
|
||||
```bash
|
||||
# 1. Configure Ollama to listen on network
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# 2. Pull a model
|
||||
ollama pull mistral
|
||||
|
||||
# 3. Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
# Note: 100.123.45.67 (example)
|
||||
```
|
||||
|
||||
### On Other Machines
|
||||
```bash
|
||||
# 1. Set environment variable
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
|
||||
# 2. Add to shell config (~/.zshrc, ~/.bashrc, etc.)
|
||||
echo 'export OLLAMA_ENDPOINT="http://100.123.45.67:11434"' >> ~/.zshrc
|
||||
source ~/.zshrc
|
||||
|
||||
# 3. Test connection
|
||||
curl $OLLAMA_ENDPOINT/api/tags
|
||||
```
|
||||
|
||||
### In Neovim
|
||||
```vim
|
||||
" Press <leader>cll to chat with Ollama
|
||||
" Press <leader>cc to chat with Claude
|
||||
" Press <leader>ca to see all actions
|
||||
```
|
||||
|
||||
## 🔑 Key Features
|
||||
|
||||
| Feature | Benefit |
|
||||
|---------|---------|
|
||||
| **Environment-Based** | No code changes needed on other machines |
|
||||
| **Fallback Support** | Works locally without any configuration |
|
||||
| **Network-Aware** | Automatically uses Tailscale for remote access |
|
||||
| **Easy Switching** | Use keymaps to switch between Claude and Ollama |
|
||||
| **Secure** | All traffic encrypted via Tailscale |
|
||||
| **Flexible** | Supports multiple models and configurations |
|
||||
|
||||
## 📊 Architecture
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────┐
|
||||
│ TAILSCALE NETWORK │
|
||||
│ (Encrypted VPN Tunnel) │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
│ │
|
||||
┌────▼──────────────┐ ┌─────────▼────────┐
|
||||
│ OLLAMA SERVER │ │ OTHER MACHINES │
|
||||
│ (Main Machine) │ │ (Laptop, etc.) │
|
||||
│ │ │ │
|
||||
│ Ollama :11434 │◄─────────│ Neovim + │
|
||||
│ Tailscale IP: │ Encrypted│ CodeCompanion │
|
||||
│ 100.123.45.67 │ Tunnel │ │
|
||||
└───────────────────┘ └──────────────────┘
|
||||
```
|
||||
|
||||
## ⌨️ Keymaps
|
||||
|
||||
```
|
||||
<leader>cll → Chat with Ollama
|
||||
<leader>cc → Chat with Claude Haiku
|
||||
<leader>cs → Chat with Claude Sonnet
|
||||
<leader>co → Chat with Claude Opus
|
||||
<leader>ca → Show CodeCompanion actions
|
||||
<leader>cm → Show current model
|
||||
```
|
||||
|
||||
## 🧪 Quick Test
|
||||
|
||||
```bash
|
||||
# Test 1: Ollama is running
|
||||
curl http://localhost:11434/api/tags
|
||||
|
||||
# Test 2: Remote access works
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
|
||||
# Test 3: Neovim integration
|
||||
nvim
|
||||
# Press <leader>cll
|
||||
# Type a message and press Enter
|
||||
```
|
||||
|
||||
## 📋 Recommended Models
|
||||
|
||||
| Model | Size | Speed | Quality | Use Case |
|
||||
|-------|------|-------|---------|----------|
|
||||
| **mistral** | 7B | ⚡⚡ | ⭐⭐⭐ | **Recommended** |
|
||||
| neural-chat | 7B | ⚡⚡ | ⭐⭐⭐ | Conversation |
|
||||
| orca-mini | 3B | ⚡⚡⚡ | ⭐⭐ | Quick answers |
|
||||
| llama2 | 7B | ⚡⚡ | ⭐⭐⭐ | General purpose |
|
||||
| dolphin-mixtral | 8x7B | ⚡ | ⭐⭐⭐⭐ | Complex tasks |
|
||||
|
||||
## 🚀 Getting Started
|
||||
|
||||
### Step 1: Read Documentation
|
||||
- Start with: `START_HERE.md`
|
||||
- Then read: `README_OLLAMA_INTEGRATION.md`
|
||||
|
||||
### Step 2: Configure Ollama Server
|
||||
- Follow: `docs/OLLAMA_SETUP.md`
|
||||
- Or quick version: `docs/OLLAMA_QUICK_SETUP.md`
|
||||
|
||||
### Step 3: Configure Other Machines
|
||||
- Use: `docs/ollama_env_example.sh`
|
||||
- Or follow: `docs/OLLAMA_QUICK_SETUP.md`
|
||||
|
||||
### Step 4: Test & Use
|
||||
- Test with: `curl $OLLAMA_ENDPOINT/api/tags`
|
||||
- Use in Neovim: Press `<leader>cll`
|
||||
|
||||
## 🆘 Troubleshooting
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Connection refused | Check Ollama is running: `ps aux \| grep ollama` |
|
||||
| Model not found | Pull the model: `ollama pull mistral` |
|
||||
| Can't reach remote | Verify Tailscale: `tailscale status` |
|
||||
| Env var not working | Reload shell: `source ~/.zshrc` |
|
||||
| Slow responses | Try smaller model: `ollama pull orca-mini` |
|
||||
|
||||
**Full troubleshooting**: See `docs/TROUBLESHOOTING.md`
|
||||
|
||||
## 📁 File Structure
|
||||
|
||||
```
|
||||
neovim_config/
|
||||
├── START_HERE.md (NEW) ← Start here!
|
||||
├── README_OLLAMA_INTEGRATION.md (NEW)
|
||||
├── lua/shelbybark/plugins/
|
||||
│ └── codecompanion.lua (MODIFIED)
|
||||
└── docs/
|
||||
├── OLLAMA_SETUP.md (NEW)
|
||||
├── OLLAMA_QUICK_SETUP.md (NEW)
|
||||
├── QUICK_REFERENCE.md (NEW)
|
||||
├── ARCHITECTURE.md (NEW)
|
||||
├── TROUBLESHOOTING.md (NEW)
|
||||
├── IMPLEMENTATION_CHECKLIST.md (NEW)
|
||||
├── IMPLEMENTATION_COMPLETE.md (NEW)
|
||||
├── INTEGRATION_SUMMARY.md (NEW)
|
||||
└── ollama_env_example.sh (NEW)
|
||||
```
|
||||
|
||||
## 💡 Pro Tips
|
||||
|
||||
1. **Use mistral model** - Best balance of speed and quality
|
||||
2. **Monitor network latency** - `ping 100.x.x.x` should be < 50ms
|
||||
3. **Keep Tailscale updated** - Better performance and security
|
||||
4. **Run Ollama on GPU** - Much faster inference if available
|
||||
5. **Use smaller models** - orca-mini for quick answers
|
||||
|
||||
## 🔐 Security Features
|
||||
|
||||
✅ **Encrypted Traffic** - All data encrypted via Tailscale
|
||||
✅ **Private IPs** - Uses Tailscale private IP addresses (100.x.x.x)
|
||||
✅ **No Public Exposure** - Ollama only accessible via Tailscale
|
||||
✅ **Network Isolation** - Separate from public internet
|
||||
✅ **End-to-End** - Secure connection from client to server
|
||||
|
||||
## 📞 Support Resources
|
||||
|
||||
- **Ollama**: https://github.com/ollama/ollama
|
||||
- **Tailscale**: https://tailscale.com/kb/
|
||||
- **CodeCompanion**: https://github.com/olimorris/codecompanion.nvim
|
||||
- **Neovim**: https://neovim.io/
|
||||
|
||||
## ✨ What's Next?
|
||||
|
||||
1. ✅ Read `START_HERE.md`
|
||||
2. ✅ Follow the 5-minute setup
|
||||
3. ✅ Test with `<leader>cll` in Neovim
|
||||
4. ✅ Enjoy local LLM access across your network!
|
||||
|
||||
---
|
||||
|
||||
## 📊 Status
|
||||
|
||||
| Component | Status |
|
||||
|-----------|--------|
|
||||
| Configuration | ✅ Complete |
|
||||
| Documentation | ✅ Complete |
|
||||
| Keymaps | ✅ Added |
|
||||
| Environment Support | ✅ Implemented |
|
||||
| Testing | ⏳ Ready for testing |
|
||||
|
||||
---
|
||||
|
||||
**Implementation Date**: 2026-02-05
|
||||
|
||||
**Configuration Version**: 1.0
|
||||
|
||||
**Status**: ✅ Ready to Use
|
||||
|
||||
**Next Step**: Read `START_HERE.md`
|
||||
264
README_OLLAMA_INTEGRATION.md
Normal file
264
README_OLLAMA_INTEGRATION.md
Normal file
@@ -0,0 +1,264 @@
|
||||
# CodeCompanion + Ollama + Tailscale Integration
|
||||
|
||||
## 🎯 What This Does
|
||||
|
||||
This setup allows you to use Ollama (local LLM) with CodeCompanion across your entire Tailscale network. You can:
|
||||
|
||||
- ✅ Use Ollama locally on your main machine
|
||||
- ✅ Access Ollama from other machines via Tailscale (no local Ollama needed)
|
||||
- ✅ Switch between Claude and Ollama models instantly
|
||||
- ✅ Keep your configuration synced across machines
|
||||
- ✅ Maintain privacy with encrypted Tailscale connections
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Step 1: On Your Ollama Server (Main Machine)
|
||||
|
||||
```bash
|
||||
# Ensure Ollama listens on all interfaces
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment=\"OLLAMA_HOST=0.0.0.0:11434\"
|
||||
# Save and exit
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
|
||||
# Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
# Note this down (e.g., 100.123.45.67)
|
||||
```
|
||||
|
||||
### Step 2: On Other Machines
|
||||
|
||||
Add to your shell config (`~/.zshrc`, `~/.bashrc`, etc.):
|
||||
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT=\"http://100.123.45.67:11434\"
|
||||
```
|
||||
|
||||
Replace `100.123.45.67` with your actual Tailscale IP.
|
||||
|
||||
### Step 3: Use in Neovim
|
||||
|
||||
```vim
|
||||
\" Press <leader>cll to chat with Ollama
|
||||
\" Press <leader>cc to chat with Claude
|
||||
\" Press <leader>ca to see all actions
|
||||
```
|
||||
|
||||
## 📁 Files Changed/Created
|
||||
|
||||
### Modified
|
||||
- `lua/shelbybark/plugins/codecompanion.lua` - Added Ollama adapter and keymaps
|
||||
|
||||
### Created Documentation
|
||||
- `docs/OLLAMA_SETUP.md` - Comprehensive setup guide
|
||||
- `docs/OLLAMA_QUICK_SETUP.md` - Quick reference
|
||||
- `docs/ARCHITECTURE.md` - Network architecture diagrams
|
||||
- `docs/TROUBLESHOOTING.md` - Common issues and solutions
|
||||
- `docs/IMPLEMENTATION_CHECKLIST.md` - Step-by-step checklist
|
||||
- `docs/INTEGRATION_SUMMARY.md` - Overview of changes
|
||||
- `docs/ollama_env_example.sh` - Shell configuration example
|
||||
|
||||
## 🔑 Key Features
|
||||
|
||||
### Environment-Based Configuration
|
||||
```lua
|
||||
-- Automatically reads OLLAMA_ENDPOINT environment variable
|
||||
local ollama_endpoint = os.getenv(\"OLLAMA_ENDPOINT\") or \"http://localhost:11434\"
|
||||
```
|
||||
|
||||
### Easy Model Switching
|
||||
- `<leader>cll` - Ollama
|
||||
- `<leader>cc` - Claude Haiku
|
||||
- `<leader>cs` - Claude Sonnet
|
||||
- `<leader>co` - Claude Opus
|
||||
|
||||
### Network-Aware
|
||||
- Works locally without any configuration
|
||||
- Works remotely with just one environment variable
|
||||
- Secure via Tailscale encryption
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
```
|
||||
Your Machines (Tailscale Network)
|
||||
│
|
||||
├─ Machine A (Ollama Server)
|
||||
│ └─ Ollama Service :11434
|
||||
│ └─ Tailscale IP: 100.123.45.67
|
||||
│
|
||||
├─ Machine B (Laptop)
|
||||
│ └─ Neovim + CodeCompanion
|
||||
│ └─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
│
|
||||
└─ Machine C (Desktop)
|
||||
└─ Neovim + CodeCompanion
|
||||
└─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
```
|
||||
|
||||
## 📋 Configuration Details
|
||||
|
||||
### Ollama Adapter
|
||||
- **Location**: `lua/shelbybark/plugins/codecompanion.lua` (lines 30-45)
|
||||
- **Default Model**: `mistral` (7B, fast and capable)
|
||||
- **Endpoint**: Reads from `OLLAMA_ENDPOINT` env var
|
||||
- **Fallback**: `http://localhost:11434`
|
||||
|
||||
### Available Models
|
||||
| Model | Size | Speed | Quality | Best For |
|
||||
|-------|------|-------|---------|----------|
|
||||
| mistral | 7B | ⚡⚡ | ⭐⭐⭐ | General coding |
|
||||
| neural-chat | 7B | ⚡⚡ | ⭐⭐⭐ | Conversation |
|
||||
| orca-mini | 3B | ⚡⚡⚡ | ⭐⭐ | Quick answers |
|
||||
| llama2 | 7B/13B | ⚡⚡ | ⭐⭐⭐ | General purpose |
|
||||
| dolphin-mixtral | 8x7B | ⚡ | ⭐⭐⭐⭐ | Complex tasks |
|
||||
|
||||
## 🔧 Customization
|
||||
|
||||
### Change Default Model
|
||||
Edit `lua/shelbybark/plugins/codecompanion.lua` line 40:
|
||||
```lua
|
||||
default = \"neural-chat\", -- Change this
|
||||
```
|
||||
|
||||
### Add More Adapters
|
||||
```lua
|
||||
ollama_fast = function()
|
||||
return require(\"codecompanion.adapters\").extend(\"ollama\", {
|
||||
env = { url = os.getenv(\"OLLAMA_ENDPOINT\") or \"http://localhost:11434\" },
|
||||
schema = { model = { default = \"orca-mini\" } },
|
||||
})
|
||||
end,
|
||||
```
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
### Test 1: Ollama is Running
|
||||
```bash
|
||||
curl http://localhost:11434/api/tags
|
||||
```
|
||||
|
||||
### Test 2: Network Access
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT=\"http://100.x.x.x:11434\"
|
||||
curl $OLLAMA_ENDPOINT/api/tags
|
||||
```
|
||||
|
||||
### Test 3: Neovim Integration
|
||||
```vim
|
||||
:CodeCompanionChat ollama Toggle
|
||||
\" Type a message and press Enter
|
||||
```
|
||||
|
||||
## 🆘 Troubleshooting
|
||||
|
||||
### Connection Refused
|
||||
```bash
|
||||
# Check Ollama is running
|
||||
ps aux | grep ollama
|
||||
|
||||
# Check it's listening on all interfaces
|
||||
sudo netstat -tlnp | grep 11434
|
||||
# Should show 0.0.0.0:11434, not 127.0.0.1:11434
|
||||
```
|
||||
|
||||
### Model Not Found
|
||||
```bash
|
||||
# List available models
|
||||
ollama list
|
||||
|
||||
# Pull the model
|
||||
ollama pull mistral
|
||||
```
|
||||
|
||||
### Can't Reach Remote Server
|
||||
```bash
|
||||
# Verify Tailscale
|
||||
tailscale status
|
||||
|
||||
# Test connectivity
|
||||
ping 100.x.x.x
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
|
||||
See `docs/TROUBLESHOOTING.md` for more detailed solutions.
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
- **OLLAMA_SETUP.md** - Full setup guide with all details
|
||||
- **OLLAMA_QUICK_SETUP.md** - Quick reference for other machines
|
||||
- **ARCHITECTURE.md** - Network diagrams and data flow
|
||||
- **TROUBLESHOOTING.md** - Common issues and solutions
|
||||
- **IMPLEMENTATION_CHECKLIST.md** - Step-by-step checklist
|
||||
- **INTEGRATION_SUMMARY.md** - Overview of all changes
|
||||
|
||||
## 🎓 How It Works
|
||||
|
||||
1. **Local Machine**: CodeCompanion connects to `http://localhost:11434`
|
||||
2. **Remote Machine**: CodeCompanion connects to `http://100.x.x.x:11434` via Tailscale
|
||||
3. **Tailscale**: Provides encrypted VPN tunnel between machines
|
||||
4. **Ollama**: Runs on server, serves models to all connected machines
|
||||
|
||||
## ⚙️ System Requirements
|
||||
|
||||
### Ollama Server Machine
|
||||
- 8GB+ RAM (for 7B models)
|
||||
- Modern CPU or GPU
|
||||
- Tailscale installed and running
|
||||
- Ollama installed and running
|
||||
|
||||
### Client Machines
|
||||
- Neovim 0.11.6+
|
||||
- CodeCompanion plugin
|
||||
- Tailscale installed and running
|
||||
- No Ollama needed!
|
||||
|
||||
## 🔐 Security
|
||||
|
||||
- **Tailscale**: All traffic is encrypted end-to-end
|
||||
- **Private IPs**: Uses Tailscale private IP addresses
|
||||
- **No Port Exposure**: Ollama only accessible via Tailscale
|
||||
- **Network Isolation**: Separate from public internet
|
||||
|
||||
## 💡 Tips
|
||||
|
||||
1. **Use smaller models** for faster responses (mistral, neural-chat)
|
||||
2. **Monitor network latency** with `ping 100.x.x.x`
|
||||
3. **Keep Tailscale updated** for best performance
|
||||
4. **Run Ollama on GPU** if available for faster inference
|
||||
5. **Use Claude for complex tasks**, Ollama for quick answers
|
||||
|
||||
## 🚨 Common Mistakes
|
||||
|
||||
❌ **Don't**: Forget to set `OLLAMA_HOST=0.0.0.0:11434` on server
|
||||
✅ **Do**: Bind Ollama to all interfaces so it's accessible from network
|
||||
|
||||
❌ **Don't**: Use localhost IP (127.0.0.1) for remote access
|
||||
✅ **Do**: Use Tailscale IP (100.x.x.x) for remote access
|
||||
|
||||
❌ **Don't**: Forget to export environment variable
|
||||
✅ **Do**: Add to shell config and reload shell
|
||||
|
||||
## 📞 Support
|
||||
|
||||
- **Ollama Issues**: https://github.com/ollama/ollama/issues
|
||||
- **Tailscale Help**: https://tailscale.com/kb/
|
||||
- **CodeCompanion**: https://github.com/olimorris/codecompanion.nvim
|
||||
|
||||
## 📝 Next Steps
|
||||
|
||||
1. Follow the checklist in `docs/IMPLEMENTATION_CHECKLIST.md`
|
||||
2. Set up Ollama on your server
|
||||
3. Configure environment variables on other machines
|
||||
4. Test with `<leader>cll` in Neovim
|
||||
5. Enjoy local LLM access across your network!
|
||||
|
||||
---
|
||||
|
||||
**Status**: ✅ Ready to use!
|
||||
|
||||
**Last Updated**: 2026-02-05
|
||||
|
||||
**Configuration Version**: 1.0
|
||||
201
START_HERE.md
Normal file
201
START_HERE.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# 🎉 Setup Complete - Start Here!
|
||||
|
||||
## What You Now Have
|
||||
|
||||
Your CodeCompanion is now configured to work with **Ollama** across your **Tailscale network**. This means:
|
||||
|
||||
- ✅ Use local Ollama on your main machine
|
||||
- ✅ Access Ollama from other machines via Tailscale (no local Ollama needed)
|
||||
- ✅ Switch between Claude and Ollama instantly
|
||||
- ✅ Secure, encrypted connections via Tailscale
|
||||
|
||||
## 🚀 Get Started in 5 Minutes
|
||||
|
||||
### Step 1: Configure Your Ollama Server (5 min)
|
||||
|
||||
On the machine running Ollama:
|
||||
|
||||
```bash
|
||||
# Make Ollama accessible from network
|
||||
sudo systemctl edit ollama
|
||||
```
|
||||
|
||||
Add this line in the `[Service]` section:
|
||||
```ini
|
||||
Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
```
|
||||
|
||||
Save and exit, then:
|
||||
```bash
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
|
||||
# Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
# You'll see something like: 100.123.45.67
|
||||
```
|
||||
|
||||
### Step 2: Configure Other Machines (2 min)
|
||||
|
||||
On each machine that needs to access Ollama:
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc (or ~/.bashrc)
|
||||
echo 'export OLLAMA_ENDPOINT="http://100.123.45.67:11434"' >> ~/.zshrc
|
||||
|
||||
# Reload shell
|
||||
source ~/.zshrc
|
||||
|
||||
# Test it works
|
||||
curl $OLLAMA_ENDPOINT/api/tags
|
||||
```
|
||||
|
||||
### Step 3: Use in Neovim (1 min)
|
||||
|
||||
```vim
|
||||
" Start Neovim
|
||||
nvim
|
||||
|
||||
" Press <leader>cll to chat with Ollama
|
||||
" Type a message and press Enter
|
||||
" You should get a response!
|
||||
```
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
Start with these in order:
|
||||
|
||||
1. **`README_OLLAMA_INTEGRATION.md`** ← Read this first for overview
|
||||
2. **`docs/QUICK_REFERENCE.md`** ← Quick reference card
|
||||
3. **`docs/OLLAMA_SETUP.md`** ← Full setup guide
|
||||
4. **`docs/TROUBLESHOOTING.md`** ← If something doesn't work
|
||||
|
||||
## ⌨️ Keymaps
|
||||
|
||||
| Keymap | What It Does |
|
||||
|--------|--------------|
|
||||
| `<leader>cll` | Chat with Ollama |
|
||||
| `<leader>cc` | Chat with Claude Haiku |
|
||||
| `<leader>cs` | Chat with Claude Sonnet |
|
||||
| `<leader>co` | Chat with Claude Opus |
|
||||
| `<leader>ca` | Show all CodeCompanion actions |
|
||||
|
||||
## 🔧 What Was Changed
|
||||
|
||||
### Modified
|
||||
- `lua/shelbybark/plugins/codecompanion.lua` - Added Ollama adapter and keymaps
|
||||
|
||||
### Created
|
||||
- 8 comprehensive documentation files in `docs/`
|
||||
- 1 main README file
|
||||
|
||||
## 🎯 Common Tasks
|
||||
|
||||
### Pull a Different Model
|
||||
```bash
|
||||
ollama pull neural-chat
|
||||
ollama pull llama2
|
||||
ollama pull dolphin-mixtral
|
||||
```
|
||||
|
||||
### Change Default Model
|
||||
Edit `lua/shelbybark/plugins/codecompanion.lua` line 40:
|
||||
```lua
|
||||
default = "neural-chat", -- Change this
|
||||
```
|
||||
|
||||
### Test Connection
|
||||
```bash
|
||||
# Local
|
||||
curl http://localhost:11434/api/tags
|
||||
|
||||
# Remote
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
|
||||
### List Available Models
|
||||
```bash
|
||||
ollama list
|
||||
```
|
||||
|
||||
## 🆘 Something Not Working?
|
||||
|
||||
1. **Check Ollama is running**: `ps aux | grep ollama`
|
||||
2. **Check it's listening**: `sudo netstat -tlnp | grep 11434`
|
||||
3. **Check Tailscale**: `tailscale status`
|
||||
4. **Read troubleshooting**: `docs/TROUBLESHOOTING.md`
|
||||
|
||||
## 📋 Checklist
|
||||
|
||||
- [ ] Ollama server configured with `OLLAMA_HOST=0.0.0.0:11434`
|
||||
- [ ] Ollama restarted: `sudo systemctl restart ollama`
|
||||
- [ ] Model pulled: `ollama pull mistral`
|
||||
- [ ] Tailscale IP found: `tailscale ip -4`
|
||||
- [ ] Environment variable set on other machines
|
||||
- [ ] Shell reloaded: `source ~/.zshrc`
|
||||
- [ ] Connection tested: `curl $OLLAMA_ENDPOINT/api/tags`
|
||||
- [ ] Neovim tested: Press `<leader>cll`
|
||||
|
||||
## 💡 Pro Tips
|
||||
|
||||
1. **Use mistral** - Fast, good quality, recommended
|
||||
2. **Monitor latency** - `ping 100.x.x.x` should be < 50ms
|
||||
3. **Keep Tailscale updated** - Better performance
|
||||
4. **Use GPU if available** - Much faster inference
|
||||
5. **Try smaller models** - orca-mini for quick answers
|
||||
|
||||
## 📞 Need Help?
|
||||
|
||||
- **Setup issues**: See `docs/OLLAMA_SETUP.md`
|
||||
- **Troubleshooting**: See `docs/TROUBLESHOOTING.md`
|
||||
- **Architecture**: See `docs/ARCHITECTURE.md`
|
||||
- **Quick reference**: See `docs/QUICK_REFERENCE.md`
|
||||
|
||||
## 🎓 How It Works (Simple Version)
|
||||
|
||||
```
|
||||
Your Machine A (Ollama Server)
|
||||
↓
|
||||
Ollama Service (localhost:11434)
|
||||
↓
|
||||
Tailscale Network (Encrypted)
|
||||
↓
|
||||
Your Machine B (Laptop)
|
||||
↓
|
||||
Neovim + CodeCompanion
|
||||
↓
|
||||
Press <leader>cll
|
||||
↓
|
||||
Chat with Ollama!
|
||||
```
|
||||
|
||||
## 🔐 Security
|
||||
|
||||
- All traffic encrypted via Tailscale
|
||||
- Uses private Tailscale IPs (100.x.x.x)
|
||||
- Not exposed to public internet
|
||||
- Secure end-to-end
|
||||
|
||||
## 🚀 Next Steps
|
||||
|
||||
1. ✅ Read `README_OLLAMA_INTEGRATION.md`
|
||||
2. ✅ Follow the 5-minute setup above
|
||||
3. ✅ Test with `<leader>cll` in Neovim
|
||||
4. ✅ Enjoy local LLM access across your network!
|
||||
|
||||
---
|
||||
|
||||
**Everything is ready to go!**
|
||||
|
||||
**Start with**: `README_OLLAMA_INTEGRATION.md`
|
||||
|
||||
**Questions?**: Check `docs/QUICK_REFERENCE.md`
|
||||
|
||||
**Issues?**: Check `docs/TROUBLESHOOTING.md`
|
||||
|
||||
---
|
||||
|
||||
**Date**: 2026-02-05
|
||||
**Status**: ✅ Ready to Use
|
||||
229
docs/ARCHITECTURE.md
Normal file
229
docs/ARCHITECTURE.md
Normal file
@@ -0,0 +1,229 @@
|
||||
# Network Architecture Diagram
|
||||
|
||||
## Setup Overview
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ TAILSCALE NETWORK │
|
||||
│ (Encrypted VPN Tunnel) │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│ │
|
||||
│ │
|
||||
┌────▼─────────────────┐ ┌────────────▼──────────┐
|
||||
│ OLLAMA SERVER │ │ OTHER MACHINES │
|
||||
│ (Main Machine) │ │ (Laptop, Desktop) │
|
||||
│ │ │ │
|
||||
│ ┌────────────────┐ │ │ ┌────────────────┐ │
|
||||
│ │ Ollama Service │ │ │ │ Neovim + │ │
|
||||
│ │ :11434 │ │ │ │ CodeCompanion │ │
|
||||
│ └────────────────┘ │ │ └────────────────┘ │
|
||||
│ ▲ │ │ ▲ │
|
||||
│ │ │ │ │ │
|
||||
│ ┌──────┴──────────┐ │ │ ┌──────┴──────────┐ │
|
||||
│ │ Tailscale IP: │ │ │ │ OLLAMA_ENDPOINT │ │
|
||||
│ │ 100.123.45.67 │ │ │ │ env variable │ │
|
||||
│ └─────────────────┘ │ │ │ 100.123.45.67 │ │
|
||||
│ │ │ └─────────────────┘ │
|
||||
└──────────────────────┘ └──────────────────────┘
|
||||
│ │
|
||||
│ │
|
||||
└──────────────────┬───────────────────────────┘
|
||||
│
|
||||
┌───────▼────────┐
|
||||
│ Tailscale VPN │
|
||||
│ Encrypted Link │
|
||||
└────────────────┘
|
||||
```
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Scenario 1: Local Ollama Access (Main Machine)
|
||||
|
||||
```
|
||||
Neovim (localhost)
|
||||
│
|
||||
├─ <leader>cll pressed
|
||||
│
|
||||
├─ CodeCompanion loads Ollama adapter
|
||||
│
|
||||
├─ Reads OLLAMA_ENDPOINT env var
|
||||
│ (not set, uses default)
|
||||
│
|
||||
├─ Connects to http://localhost:11434
|
||||
│
|
||||
└─ Ollama Service
|
||||
│
|
||||
├─ Loads model (mistral)
|
||||
│
|
||||
└─ Returns response
|
||||
```
|
||||
|
||||
### Scenario 2: Remote Ollama Access (Other Machine)
|
||||
|
||||
```
|
||||
Neovim (other machine)
|
||||
│
|
||||
├─ <leader>cll pressed
|
||||
│
|
||||
├─ CodeCompanion loads Ollama adapter
|
||||
│
|
||||
├─ Reads OLLAMA_ENDPOINT env var
|
||||
│ (set to http://100.123.45.67:11434)
|
||||
│
|
||||
├─ Connects via Tailscale VPN
|
||||
│
|
||||
├─ Tailscale Network
|
||||
│ (Encrypted tunnel)
|
||||
│
|
||||
└─ Ollama Service (on main machine)
|
||||
│
|
||||
├─ Loads model (mistral)
|
||||
│
|
||||
└─ Returns response
|
||||
```
|
||||
|
||||
## Configuration Hierarchy
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ CodeCompanion Ollama Adapter Configuration │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────┐
|
||||
│ Check OLLAMA_ENDPOINT env var │
|
||||
└─────────────────────────────────┘
|
||||
│
|
||||
┌─────────┴─────────┐
|
||||
│ │
|
||||
Set? Not Set?
|
||||
│ │
|
||||
▼ ▼
|
||||
┌──────────────┐ ┌──────────────────┐
|
||||
│ Use env var │ │ Use default: │
|
||||
│ value │ │ localhost:11434 │
|
||||
└──────────────┘ └──────────────────┘
|
||||
│ │
|
||||
└─────────┬─────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────┐
|
||||
│ Connect to Ollama Service │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Model Selection Flow
|
||||
|
||||
```
|
||||
User presses <leader>cll
|
||||
│
|
||||
▼
|
||||
CodeCompanion opens chat window
|
||||
│
|
||||
▼
|
||||
Loads Ollama adapter
|
||||
│
|
||||
├─ Checks schema.model.default
|
||||
│ (currently: "mistral")
|
||||
│
|
||||
▼
|
||||
Connects to Ollama endpoint
|
||||
│
|
||||
├─ Requests model: mistral
|
||||
│
|
||||
▼
|
||||
Ollama loads model into memory
|
||||
│
|
||||
├─ If not loaded, pulls from registry
|
||||
│
|
||||
▼
|
||||
Ready for chat
|
||||
│
|
||||
├─ User types message
|
||||
│
|
||||
▼
|
||||
Ollama generates response
|
||||
│
|
||||
▼
|
||||
Response displayed in Neovim
|
||||
```
|
||||
|
||||
## Environment Variable Resolution
|
||||
|
||||
```
|
||||
Machine A (Ollama Server)
|
||||
├─ OLLAMA_ENDPOINT not set
|
||||
├─ CodeCompanion uses: http://localhost:11434
|
||||
└─ Connects to local Ollama
|
||||
|
||||
Machine B (Other Machine)
|
||||
├─ OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
├─ CodeCompanion uses: http://100.123.45.67:11434
|
||||
└─ Connects via Tailscale to Machine A's Ollama
|
||||
|
||||
Machine C (Another Machine)
|
||||
├─ OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
├─ CodeCompanion uses: http://100.123.45.67:11434
|
||||
└─ Connects via Tailscale to Machine A's Ollama
|
||||
```
|
||||
|
||||
## Adapter Priority
|
||||
|
||||
```
|
||||
CodeCompanion Strategies
|
||||
│
|
||||
├─ Chat Strategy
|
||||
│ └─ adapter: "anthropic_haiku" (default)
|
||||
│ └─ Can switch to "ollama" with <leader>cll
|
||||
│
|
||||
├─ Inline Strategy
|
||||
│ └─ adapter: "anthropic_haiku" (default)
|
||||
│ └─ Can switch to "ollama" if needed
|
||||
│
|
||||
└─ Available Adapters
|
||||
├─ anthropic (Claude Sonnet)
|
||||
├─ anthropic_opus (Claude Opus)
|
||||
├─ anthropic_haiku (Claude Haiku)
|
||||
└─ ollama (Local or Remote)
|
||||
```
|
||||
|
||||
## Tailscale Network Benefits
|
||||
|
||||
```
|
||||
Without Tailscale:
|
||||
┌──────────────┐ ┌──────────────┐
|
||||
│ Machine A │ │ Machine B │
|
||||
│ (Ollama) │ │ (Neovim) │
|
||||
└──────────────┘ └──────────────┘
|
||||
│ │
|
||||
└────────────────────────┘
|
||||
Direct IP (exposed, insecure)
|
||||
|
||||
With Tailscale:
|
||||
┌──────────────┐ ┌──────────────┐
|
||||
│ Machine A │ │ Machine B │
|
||||
│ (Ollama) │ │ (Neovim) │
|
||||
└──────────────┘ └──────────────┘
|
||||
│ │
|
||||
└────────────────────────┘
|
||||
Encrypted VPN Tunnel (secure)
|
||||
Private Tailscale IPs only
|
||||
```
|
||||
|
||||
## Recommended Model Sizes
|
||||
|
||||
```
|
||||
Network Latency Impact:
|
||||
┌─────────────────────────────────────────┐
|
||||
│ Model Size │ Speed │ Quality │ Latency │
|
||||
├─────────────────────────────────────────┤
|
||||
│ 3B │ ⚡⚡⚡ │ ⭐⭐ │ Low │
|
||||
│ 7B │ ⚡⚡ │ ⭐⭐⭐ │ Medium │
|
||||
│ 13B │ ⚡ │ ⭐⭐⭐ │ Medium │
|
||||
│ 8x7B (MoE) │ ⚡ │ ⭐⭐⭐⭐│ High │
|
||||
└─────────────────────────────────────────┘
|
||||
|
||||
For Tailscale (network latency):
|
||||
Recommended: 7B models (mistral, neural-chat)
|
||||
Good balance of speed and quality
|
||||
```
|
||||
121
docs/IMPLEMENTATION_CHECKLIST.md
Normal file
121
docs/IMPLEMENTATION_CHECKLIST.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Implementation Checklist
|
||||
|
||||
## ✅ Completed
|
||||
|
||||
- [x] Added Ollama adapter to CodeCompanion configuration
|
||||
- [x] Configured environment variable support (`OLLAMA_ENDPOINT`)
|
||||
- [x] Added keymaps for Ollama (`<leader>cll`)
|
||||
- [x] Created comprehensive documentation
|
||||
- [x] Created quick setup guide for other machines
|
||||
- [x] Created shell configuration example
|
||||
|
||||
## 📋 To Do
|
||||
|
||||
### On Your Ollama Server Machine
|
||||
|
||||
- [ ] Verify Ollama is installed: `ollama --version`
|
||||
- [ ] Ensure Ollama listens on all interfaces:
|
||||
```bash
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
sudo systemctl restart ollama
|
||||
```
|
||||
- [ ] Pull your preferred model:
|
||||
```bash
|
||||
ollama pull mistral
|
||||
# or: ollama pull neural-chat
|
||||
```
|
||||
- [ ] Find your Tailscale IP:
|
||||
```bash
|
||||
tailscale ip -4
|
||||
# Note this down: 100.x.x.x
|
||||
```
|
||||
- [ ] Test Ollama is accessible:
|
||||
```bash
|
||||
curl http://localhost:11434/api/tags
|
||||
```
|
||||
|
||||
### On Your Main Machine (with Ollama)
|
||||
|
||||
- [ ] Reload Neovim config: `:source ~/.config/nvim/init.lua` or restart Neovim
|
||||
- [ ] Test Ollama integration:
|
||||
```vim
|
||||
:CodeCompanionChat ollama Toggle
|
||||
```
|
||||
- [ ] Verify it works by sending a message
|
||||
|
||||
### On Other Machines (without Ollama)
|
||||
|
||||
- [ ] Add to shell config (`~/.zshrc`, `~/.bashrc`, etc.):
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.x.x.x:11434"
|
||||
# Replace 100.x.x.x with your Tailscale IP
|
||||
```
|
||||
- [ ] Reload shell: `source ~/.zshrc` (or your shell config)
|
||||
- [ ] Test connection:
|
||||
```bash
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
- [ ] Start Neovim and test:
|
||||
```vim
|
||||
:CodeCompanionChat ollama Toggle
|
||||
```
|
||||
|
||||
## 🔧 Optional Customizations
|
||||
|
||||
- [ ] Change default Ollama model in `lua/shelbybark/plugins/codecompanion.lua` (line 40)
|
||||
- [ ] Add more Ollama adapters for different models
|
||||
- [ ] Create machine-specific configs if needed
|
||||
- [ ] Set up Ollama to run on GPU for faster inference
|
||||
|
||||
## 📚 Documentation Files
|
||||
|
||||
- `docs/OLLAMA_SETUP.md` - Full setup guide with troubleshooting
|
||||
- `docs/OLLAMA_QUICK_SETUP.md` - Quick reference for other machines
|
||||
- `docs/ollama_env_example.sh` - Shell configuration example
|
||||
- `docs/INTEGRATION_SUMMARY.md` - Overview of changes
|
||||
|
||||
## 🧪 Testing
|
||||
|
||||
### Test 1: Local Ollama Access
|
||||
```bash
|
||||
# On your Ollama server machine
|
||||
curl http://localhost:11434/api/tags
|
||||
# Should return JSON with available models
|
||||
```
|
||||
|
||||
### Test 2: Network Access via Tailscale
|
||||
```bash
|
||||
# On another machine
|
||||
export OLLAMA_ENDPOINT="http://100.x.x.x:11434"
|
||||
curl $OLLAMA_ENDPOINT/api/tags
|
||||
# Should return JSON with available models
|
||||
```
|
||||
|
||||
### Test 3: Neovim Integration
|
||||
```vim
|
||||
# In Neovim on any machine
|
||||
:CodeCompanionChat ollama Toggle
|
||||
# Should open chat window
|
||||
# Type a message and press Enter
|
||||
# Should get response from Ollama
|
||||
```
|
||||
|
||||
## 🆘 Quick Troubleshooting
|
||||
|
||||
| Problem | Quick Fix |
|
||||
|---------|-----------|
|
||||
| "Connection refused" | Check Ollama is running: `curl http://localhost:11434/api/tags` |
|
||||
| "Model not found" | Pull the model: `ollama pull mistral` |
|
||||
| "Can't reach server" | Verify Tailscale: `tailscale status` |
|
||||
| "Slow responses" | Try smaller model or check server resources |
|
||||
|
||||
## 📞 Support Resources
|
||||
|
||||
- Ollama Issues: https://github.com/ollama/ollama/issues
|
||||
- Tailscale Help: https://tailscale.com/kb/
|
||||
- CodeCompanion: https://github.com/olimorris/codecompanion.nvim
|
||||
|
||||
---
|
||||
|
||||
**Status**: Ready to use! Follow the "To Do" section to complete setup.
|
||||
207
docs/IMPLEMENTATION_COMPLETE.md
Normal file
207
docs/IMPLEMENTATION_COMPLETE.md
Normal file
@@ -0,0 +1,207 @@
|
||||
# Implementation Complete ✅
|
||||
|
||||
## Summary of Changes
|
||||
|
||||
Your CodeCompanion configuration has been successfully updated to support Ollama with Tailscale network access.
|
||||
|
||||
## What Was Changed
|
||||
|
||||
### 1. Modified File
|
||||
**`lua/shelbybark/plugins/codecompanion.lua`**
|
||||
- Added Ollama adapter (lines 30-45)
|
||||
- Configured environment variable support
|
||||
- Added Ollama keymaps `<leader>cll` (lines 223-237)
|
||||
|
||||
### 2. Created Documentation (7 files)
|
||||
- `README_OLLAMA_INTEGRATION.md` - Main overview
|
||||
- `docs/OLLAMA_SETUP.md` - Comprehensive setup guide
|
||||
- `docs/OLLAMA_QUICK_SETUP.md` - Quick reference for other machines
|
||||
- `docs/ARCHITECTURE.md` - Network architecture diagrams
|
||||
- `docs/TROUBLESHOOTING.md` - Common issues and solutions
|
||||
- `docs/IMPLEMENTATION_CHECKLIST.md` - Step-by-step checklist
|
||||
- `docs/QUICK_REFERENCE.md` - Quick reference card
|
||||
- `docs/ollama_env_example.sh` - Shell configuration example
|
||||
|
||||
## How It Works
|
||||
|
||||
### Local Access (Main Machine)
|
||||
```bash
|
||||
nvim
|
||||
# Press <leader>cll
|
||||
# Connects to http://localhost:11434 automatically
|
||||
```
|
||||
|
||||
### Remote Access (Other Machines)
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
nvim
|
||||
# Press <leader>cll
|
||||
# Connects via Tailscale to your Ollama server
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
✅ **Environment-Based**: Reads `OLLAMA_ENDPOINT` environment variable
|
||||
✅ **Fallback Support**: Defaults to localhost if env var not set
|
||||
✅ **Easy Switching**: Use `<leader>cll` to chat with Ollama
|
||||
✅ **Network-Aware**: Works locally and remotely
|
||||
✅ **Secure**: All traffic encrypted via Tailscale
|
||||
✅ **No Code Changes**: Just set an environment variable on other machines
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Ollama Adapter
|
||||
- **Location**: `lua/shelbybark/plugins/codecompanion.lua` (lines 30-45)
|
||||
- **Default Model**: `mistral` (7B, fast and capable)
|
||||
- **Endpoint**: Reads from `OLLAMA_ENDPOINT` env var
|
||||
- **Fallback**: `http://localhost:11434`
|
||||
|
||||
### Keymaps
|
||||
- `<leader>cll` - Chat with Ollama (normal and visual modes)
|
||||
- `<leader>cc` - Chat with Claude Haiku (existing)
|
||||
- `<leader>cs` - Chat with Claude Sonnet (existing)
|
||||
- `<leader>co` - Chat with Claude Opus (existing)
|
||||
|
||||
## Next Steps
|
||||
|
||||
### 1. On Your Ollama Server Machine
|
||||
|
||||
```bash
|
||||
# Ensure Ollama listens on all interfaces
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
# Save and exit
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
|
||||
# Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
# Note this down (e.g., 100.123.45.67)
|
||||
|
||||
# Test it works
|
||||
curl http://localhost:11434/api/tags
|
||||
```
|
||||
|
||||
### 2. On Other Machines
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc, ~/.bashrc, or ~/.config/fish/config.fish
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
|
||||
# Reload shell
|
||||
source ~/.zshrc # or ~/.bashrc
|
||||
|
||||
# Test connection
|
||||
curl $OLLAMA_ENDPOINT/api/tags
|
||||
|
||||
# Start Neovim and press <leader>cll
|
||||
nvim
|
||||
```
|
||||
|
||||
### 3. Test in Neovim
|
||||
|
||||
```vim
|
||||
" Press <leader>cll to open Ollama chat
|
||||
" Type a message and press Enter
|
||||
" You should get a response from Ollama
|
||||
```
|
||||
|
||||
## Documentation Guide
|
||||
|
||||
| Document | Purpose | Read When |
|
||||
|----------|---------|-----------|
|
||||
| `README_OLLAMA_INTEGRATION.md` | Overview | First, to understand the setup |
|
||||
| `docs/QUICK_REFERENCE.md` | Quick reference | Need quick answers |
|
||||
| `docs/OLLAMA_SETUP.md` | Full setup guide | Setting up for the first time |
|
||||
| `docs/OLLAMA_QUICK_SETUP.md` | Quick setup | Setting up other machines |
|
||||
| `docs/ARCHITECTURE.md` | Network diagrams | Understanding how it works |
|
||||
| `docs/TROUBLESHOOTING.md` | Problem solving | Something isn't working |
|
||||
| `docs/IMPLEMENTATION_CHECKLIST.md` | Step-by-step | Following setup steps |
|
||||
| `docs/ollama_env_example.sh` | Shell config | Setting up environment variables |
|
||||
|
||||
## Recommended Models
|
||||
|
||||
| Model | Size | Speed | Quality | Best For |
|
||||
|-------|------|-------|---------|----------|
|
||||
| mistral | 7B | ⚡⚡ | ⭐⭐⭐ | General coding (recommended) |
|
||||
| neural-chat | 7B | ⚡⚡ | ⭐⭐⭐ | Conversation |
|
||||
| orca-mini | 3B | ⚡⚡⚡ | ⭐⭐ | Quick answers |
|
||||
| llama2 | 7B/13B | ⚡⚡ | ⭐⭐⭐ | General purpose |
|
||||
| dolphin-mixtral | 8x7B | ⚡ | ⭐⭐⭐⭐ | Complex tasks |
|
||||
|
||||
## Troubleshooting Quick Links
|
||||
|
||||
- **Connection refused**: See `docs/TROUBLESHOOTING.md` → Issue #1
|
||||
- **Model not found**: See `docs/TROUBLESHOOTING.md` → Issue #2
|
||||
- **Tailscale issues**: See `docs/TROUBLESHOOTING.md` → Issue #3
|
||||
- **Slow responses**: See `docs/TROUBLESHOOTING.md` → Issue #4
|
||||
- **Environment variable not working**: See `docs/TROUBLESHOOTING.md` → Issue #5
|
||||
|
||||
## File Structure
|
||||
|
||||
```
|
||||
neovim_config/
|
||||
├── lua/shelbybark/plugins/
|
||||
│ └── codecompanion.lua (MODIFIED)
|
||||
├── docs/
|
||||
│ ├── OLLAMA_SETUP.md (NEW)
|
||||
│ ├── OLLAMA_QUICK_SETUP.md (NEW)
|
||||
│ ├── ARCHITECTURE.md (NEW)
|
||||
│ ├── TROUBLESHOOTING.md (NEW)
|
||||
│ ├── IMPLEMENTATION_CHECKLIST.md (NEW)
|
||||
│ ├── QUICK_REFERENCE.md (NEW)
|
||||
│ ├── ollama_env_example.sh (NEW)
|
||||
│ └── INTEGRATION_SUMMARY.md (NEW)
|
||||
├── README_OLLAMA_INTEGRATION.md (NEW)
|
||||
└── docs/IMPLEMENTATION_COMPLETE.md (THIS FILE)
|
||||
```
|
||||
|
||||
## Quick Start (TL;DR)
|
||||
|
||||
```bash
|
||||
# On Ollama server
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
sudo systemctl restart ollama
|
||||
ollama pull mistral
|
||||
tailscale ip -4 # Note the IP
|
||||
|
||||
# On other machines
|
||||
echo 'export OLLAMA_ENDPOINT="http://100.x.x.x:11434"' >> ~/.zshrc
|
||||
source ~/.zshrc
|
||||
nvim
|
||||
# Press <leader>cll
|
||||
```
|
||||
|
||||
## Support
|
||||
|
||||
- **Full Setup Guide**: `docs/OLLAMA_SETUP.md`
|
||||
- **Quick Reference**: `docs/QUICK_REFERENCE.md`
|
||||
- **Troubleshooting**: `docs/TROUBLESHOOTING.md`
|
||||
- **Architecture**: `docs/ARCHITECTURE.md`
|
||||
|
||||
## What's Next?
|
||||
|
||||
1. ✅ Configuration is ready
|
||||
2. 📋 Follow the checklist in `docs/IMPLEMENTATION_CHECKLIST.md`
|
||||
3. 🚀 Set up Ollama on your server
|
||||
4. 💻 Configure other machines
|
||||
5. 🎉 Start using Ollama with CodeCompanion!
|
||||
|
||||
## Questions?
|
||||
|
||||
- Check `docs/TROUBLESHOOTING.md` for common issues
|
||||
- Review `docs/ARCHITECTURE.md` to understand how it works
|
||||
- See `docs/OLLAMA_SETUP.md` for detailed setup instructions
|
||||
|
||||
---
|
||||
|
||||
**Status**: ✅ Implementation Complete
|
||||
|
||||
**Date**: 2026-02-05
|
||||
|
||||
**Configuration Version**: 1.0
|
||||
|
||||
**Ready to Use**: Yes!
|
||||
142
docs/INTEGRATION_SUMMARY.md
Normal file
142
docs/INTEGRATION_SUMMARY.md
Normal file
@@ -0,0 +1,142 @@
|
||||
# CodeCompanion + Ollama Integration Summary
|
||||
|
||||
## What Was Done
|
||||
|
||||
Your CodeCompanion configuration has been updated to support Ollama models alongside your existing Claude adapters. Here's what changed:
|
||||
|
||||
### 1. **Added Ollama Adapter** (`lua/shelbybark/plugins/codecompanion.lua`)
|
||||
- Reads `OLLAMA_ENDPOINT` environment variable
|
||||
- Defaults to `http://localhost:11434` if not set
|
||||
- Uses `mistral` as the default model (configurable)
|
||||
|
||||
### 2. **Added Ollama Keymaps**
|
||||
- `<leader>cll` - Toggle Ollama chat (normal and visual modes)
|
||||
- Works alongside existing Claude keymaps
|
||||
|
||||
### 3. **Created Documentation**
|
||||
- `docs/OLLAMA_SETUP.md` - Comprehensive setup guide
|
||||
- `docs/OLLAMA_QUICK_SETUP.md` - Quick reference for other machines
|
||||
- `docs/ollama_env_example.sh` - Shell configuration example
|
||||
|
||||
## How It Works
|
||||
|
||||
### On Your Main Machine (with Ollama)
|
||||
```bash
|
||||
# Ollama runs locally, CodeCompanion uses http://localhost:11434 by default
|
||||
nvim
|
||||
# Press <leader>cll to chat with Ollama
|
||||
```
|
||||
|
||||
### On Other Machines (without Ollama)
|
||||
```bash
|
||||
# Set environment variable to your Ollama server's Tailscale IP
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
nvim
|
||||
# Press <leader>cll to chat with Ollama via Tailscale
|
||||
```
|
||||
|
||||
## Key Features
|
||||
|
||||
✅ **Network Access**: Access Ollama from any machine on your Tailscale network
|
||||
✅ **Fallback Support**: Keep Claude as primary, use Ollama as alternative
|
||||
✅ **Easy Switching**: Use keymaps to switch between models instantly
|
||||
✅ **Environment-Based**: Configuration adapts to each machine automatically
|
||||
✅ **No Code Changes**: Just set an environment variable on other machines
|
||||
|
||||
## Next Steps
|
||||
|
||||
### 1. **On Your Ollama Server Machine**
|
||||
|
||||
Ensure Ollama is exposed to the network:
|
||||
```bash
|
||||
# Check current Ollama binding
|
||||
ps aux | grep ollama
|
||||
|
||||
# If needed, set it to listen on all interfaces
|
||||
sudo systemctl edit ollama
|
||||
# Add: Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
# Save and exit, then:
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
```
|
||||
|
||||
### 2. **On Other Machines**
|
||||
|
||||
Add to your shell config (`~/.zshrc`, `~/.bashrc`, etc.):
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://YOUR_TAILSCALE_IP:11434"
|
||||
```
|
||||
|
||||
### 3. **Test It**
|
||||
|
||||
```bash
|
||||
# Verify connection
|
||||
curl http://YOUR_TAILSCALE_IP:11434/api/tags
|
||||
|
||||
# Start Neovim and press <leader>cll
|
||||
nvim
|
||||
```
|
||||
|
||||
## Configuration Details
|
||||
|
||||
### Ollama Adapter Settings
|
||||
- **Location**: `lua/shelbybark/plugins/codecompanion.lua` (lines 35-45)
|
||||
- **Default Model**: `mistral` (change to your preference)
|
||||
- **Endpoint**: Read from `OLLAMA_ENDPOINT` env var
|
||||
- **Fallback**: `http://localhost:11434`
|
||||
|
||||
### Available Models to Try
|
||||
- `mistral` - Fast, good quality (recommended)
|
||||
- `neural-chat` - Optimized for conversation
|
||||
- `dolphin-mixtral` - Larger, higher quality
|
||||
- `llama2` - General purpose
|
||||
- `orca-mini` - Very fast, lightweight
|
||||
|
||||
Pull models with: `ollama pull <model-name>`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Connection Issues
|
||||
```bash
|
||||
# Test Ollama is running
|
||||
curl http://localhost:11434/api/tags
|
||||
|
||||
# Test Tailscale connectivity
|
||||
ping 100.x.x.x # Use your Tailscale IP
|
||||
|
||||
# Check Ollama is bound to network
|
||||
sudo netstat -tlnp | grep 11434
|
||||
```
|
||||
|
||||
### Model Issues
|
||||
```bash
|
||||
# List available models
|
||||
curl http://localhost:11434/api/tags | jq '.models[].name'
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
```
|
||||
|
||||
## Files Modified/Created
|
||||
|
||||
- ✏️ `lua/shelbybark/plugins/codecompanion.lua` - Added Ollama adapter and keymaps
|
||||
- ✨ `docs/OLLAMA_SETUP.md` - Comprehensive setup guide
|
||||
- ✨ `docs/OLLAMA_QUICK_SETUP.md` - Quick reference
|
||||
- ✨ `docs/ollama_env_example.sh` - Shell config example
|
||||
- 📄 `docs/INTEGRATION_SUMMARY.md` - This file
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
1. Check the troubleshooting section in `docs/OLLAMA_SETUP.md`
|
||||
2. Verify Ollama is running: `curl http://localhost:11434/api/tags`
|
||||
3. Verify Tailscale connectivity: `tailscale status`
|
||||
4. Check CodeCompanion logs in Neovim: `:messages`
|
||||
|
||||
## References
|
||||
|
||||
- [Ollama GitHub](https://github.com/ollama/ollama)
|
||||
- [Tailscale Documentation](https://tailscale.com/kb/)
|
||||
- [CodeCompanion.nvim](https://github.com/olimorris/codecompanion.nvim)
|
||||
49
docs/OLLAMA_QUICK_SETUP.md
Normal file
49
docs/OLLAMA_QUICK_SETUP.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# Quick Setup for Other Machines
|
||||
|
||||
## Step 1: Find Your Ollama Server's Tailscale IP
|
||||
|
||||
On your Ollama server machine, run:
|
||||
```bash
|
||||
tailscale ip -4
|
||||
```
|
||||
|
||||
Example output: `100.123.45.67`
|
||||
|
||||
## Step 2: Set Environment Variable on Other Machines
|
||||
|
||||
Add this to your shell config file (`~/.zshrc`, `~/.bashrc`, or `~/.config/fish/config.fish`):
|
||||
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
```
|
||||
|
||||
Replace `100.123.45.67` with your actual Tailscale IP.
|
||||
|
||||
## Step 3: Verify Connection
|
||||
|
||||
Test the connection:
|
||||
```bash
|
||||
curl http://100.123.45.67:11434/api/tags
|
||||
```
|
||||
|
||||
You should see a JSON response with available models.
|
||||
|
||||
## Step 4: Use in Neovim
|
||||
|
||||
Start Neovim and press `<leader>cll` to chat with Ollama!
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
| Issue | Solution |
|
||||
|-------|----------|
|
||||
| Connection refused | Verify Ollama is running: `curl http://100.123.45.67:11434/api/tags` |
|
||||
| Tailscale not working | Run `tailscale status` on both machines |
|
||||
| Model not found | Pull the model on the server: `ollama pull mistral` |
|
||||
| Slow responses | Try a smaller model or check server resources |
|
||||
|
||||
## Available Keymaps
|
||||
|
||||
- `<leader>cll` - Chat with Ollama
|
||||
- `<leader>cc` - Chat with Claude Haiku
|
||||
- `<leader>cs` - Chat with Claude Sonnet
|
||||
- `<leader>co` - Chat with Claude Opus
|
||||
202
docs/OLLAMA_SETUP.md
Normal file
202
docs/OLLAMA_SETUP.md
Normal file
@@ -0,0 +1,202 @@
|
||||
# CodeCompanion + Ollama Setup Guide
|
||||
|
||||
This guide explains how to use Ollama with CodeCompanion across your network via Tailscale.
|
||||
|
||||
## Overview
|
||||
|
||||
Your CodeCompanion configuration now supports both Claude (via Anthropic API) and Ollama models. You can:
|
||||
- Use Ollama locally on your main machine
|
||||
- Access Ollama from other machines on your network via Tailscale
|
||||
- Switch between Claude and Ollama models seamlessly
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### On Your Ollama Server Machine
|
||||
|
||||
1. **Install Ollama** (if not already done)
|
||||
```bash
|
||||
curl -fsSL https://ollama.ai/install.sh | sh
|
||||
```
|
||||
|
||||
2. **Start Ollama with network binding**
|
||||
|
||||
By default, Ollama only listens on `localhost:11434`. To access it from other machines, you need to expose it to your network:
|
||||
|
||||
```bash
|
||||
# Option 1: Run Ollama with network binding (temporary)
|
||||
OLLAMA_HOST=0.0.0.0:11434 ollama serve
|
||||
|
||||
# Option 2: Set it permanently in systemd (recommended)
|
||||
sudo systemctl edit ollama
|
||||
```
|
||||
|
||||
Add this to the systemd service file:
|
||||
```ini
|
||||
[Service]
|
||||
Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
```
|
||||
|
||||
Then restart:
|
||||
```bash
|
||||
sudo systemctl restart ollama
|
||||
```
|
||||
|
||||
3. **Pull a model** (if not already done)
|
||||
```bash
|
||||
ollama pull mistral
|
||||
# Or try other models:
|
||||
# ollama pull neural-chat
|
||||
# ollama pull dolphin-mixtral
|
||||
# ollama pull llama2
|
||||
```
|
||||
|
||||
4. **Find your Tailscale IP**
|
||||
```bash
|
||||
tailscale ip -4
|
||||
# Output example: 100.123.45.67
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### On Your Main Machine (with Ollama)
|
||||
|
||||
**Default behavior:** The config will use `http://localhost:11434` automatically.
|
||||
|
||||
To override, set the environment variable:
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://localhost:11434"
|
||||
```
|
||||
|
||||
### On Other Machines (without Ollama)
|
||||
|
||||
Set the `OLLAMA_ENDPOINT` environment variable to point to your Ollama server's Tailscale IP:
|
||||
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
```
|
||||
|
||||
**Make it persistent** by adding to your shell config (`~/.zshrc`, `~/.bashrc`, etc.):
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### Keymaps
|
||||
|
||||
- **`<leader>cll`** - Toggle chat with Ollama (normal and visual modes)
|
||||
- **`<leader>cc`** - Toggle chat with Claude Haiku (default)
|
||||
- **`<leader>cs`** - Toggle chat with Claude Sonnet
|
||||
- **`<leader>co`** - Toggle chat with Claude Opus
|
||||
- **`<leader>ca`** - Show CodeCompanion actions
|
||||
- **`<leader>cm`** - Show current model
|
||||
|
||||
### Switching Models
|
||||
|
||||
You can also use the `:CodeCompanionSwitchModel` command:
|
||||
```vim
|
||||
:CodeCompanionSwitchModel haiku
|
||||
:CodeCompanionSwitchModel sonnet
|
||||
:CodeCompanionSwitchModel opus
|
||||
```
|
||||
|
||||
To add Ollama to this command, you would need to extend the configuration.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "Connection refused" error
|
||||
|
||||
**Problem:** You're getting connection errors when trying to use Ollama.
|
||||
|
||||
**Solutions:**
|
||||
1. Verify Ollama is running: `curl http://localhost:11434/api/tags`
|
||||
2. Check if it's bound to the network: `sudo netstat -tlnp | grep 11434`
|
||||
3. Verify Tailscale connectivity: `ping 100.x.x.x` (use the Tailscale IP)
|
||||
4. Check firewall: `sudo ufw status` (if using UFW)
|
||||
|
||||
### "Model not found" error
|
||||
|
||||
**Problem:** The model you specified doesn't exist on the Ollama server.
|
||||
|
||||
**Solution:**
|
||||
1. List available models: `curl http://localhost:11434/api/tags`
|
||||
2. Pull the model: `ollama pull mistral`
|
||||
3. Update the default model in `lua/shelbybark/plugins/codecompanion.lua` if needed
|
||||
|
||||
### Slow responses
|
||||
|
||||
**Problem:** Responses are very slow.
|
||||
|
||||
**Causes & Solutions:**
|
||||
1. **Network latency**: Tailscale adds minimal overhead, but check your network
|
||||
2. **Model size**: Larger models (7B+) are slower. Try smaller models like `mistral` or `neural-chat`
|
||||
3. **Server resources**: Check CPU/RAM on the Ollama server with `top` or `htop`
|
||||
|
||||
### Tailscale not connecting
|
||||
|
||||
**Problem:** Can't reach the Ollama server via Tailscale IP.
|
||||
|
||||
**Solutions:**
|
||||
1. Verify Tailscale is running: `tailscale status`
|
||||
2. Check both machines are on the same Tailscale network
|
||||
3. Verify the Tailscale IP is correct: `tailscale ip -4`
|
||||
4. Check firewall rules on the Ollama server
|
||||
|
||||
## Recommended Models for CodeCompanion
|
||||
|
||||
| Model | Size | Speed | Quality | Best For |
|
||||
|-------|------|-------|---------|----------|
|
||||
| mistral | 7B | Fast | Good | General coding |
|
||||
| neural-chat | 7B | Fast | Good | Chat/conversation |
|
||||
| dolphin-mixtral | 8x7B | Slower | Excellent | Complex tasks |
|
||||
| llama2 | 7B/13B | Medium | Good | General purpose |
|
||||
| orca-mini | 3B | Very Fast | Fair | Quick answers |
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Custom Model Selection
|
||||
|
||||
To change the default Ollama model, edit `lua/shelbybark/plugins/codecompanion.lua`:
|
||||
|
||||
```lua
|
||||
schema = {
|
||||
model = {
|
||||
default = "neural-chat", -- Change this to your preferred model
|
||||
},
|
||||
},
|
||||
```
|
||||
|
||||
### Multiple Ollama Servers
|
||||
|
||||
If you have multiple Ollama servers, you can create multiple adapters:
|
||||
|
||||
```lua
|
||||
ollama_main = function()
|
||||
return require("codecompanion.adapters").extend("ollama", {
|
||||
env = { url = "http://100.123.45.67:11434" },
|
||||
schema = { model = { default = "mistral" } },
|
||||
})
|
||||
end,
|
||||
ollama_backup = function()
|
||||
return require("codecompanion.adapters").extend("ollama", {
|
||||
env = { url = "http://100.123.45.68:11434" },
|
||||
schema = { model = { default = "neural-chat" } },
|
||||
})
|
||||
end,
|
||||
```
|
||||
|
||||
Then add keymaps for each.
|
||||
|
||||
## Performance Tips
|
||||
|
||||
1. **Use smaller models** for faster responses (mistral, neural-chat)
|
||||
2. **Run Ollama on a machine with good specs** (8GB+ RAM, modern CPU)
|
||||
3. **Keep Tailscale updated** for best network performance
|
||||
4. **Monitor network latency** with `ping` to your Ollama server
|
||||
5. **Consider running Ollama on GPU** if available for faster inference
|
||||
|
||||
## References
|
||||
|
||||
- [Ollama Documentation](https://github.com/ollama/ollama)
|
||||
- [Tailscale Documentation](https://tailscale.com/kb/)
|
||||
- [CodeCompanion Documentation](https://github.com/olimorris/codecompanion.nvim)
|
||||
227
docs/QUICK_REFERENCE.md
Normal file
227
docs/QUICK_REFERENCE.md
Normal file
@@ -0,0 +1,227 @@
|
||||
# Quick Reference Card
|
||||
|
||||
## 🎯 At a Glance
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ CodeCompanion + Ollama + Tailscale Integration │
|
||||
│ Quick Reference Card │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## ⌨️ Keymaps
|
||||
|
||||
| Keymap | Action | Mode |
|
||||
|--------|--------|------|
|
||||
| `<leader>cll` | Chat with Ollama | Normal, Visual |
|
||||
| `<leader>cc` | Chat with Claude Haiku | Normal, Visual |
|
||||
| `<leader>cs` | Chat with Claude Sonnet | Normal, Visual |
|
||||
| `<leader>co` | Chat with Claude Opus | Normal, Visual |
|
||||
| `<leader>ca` | Show CodeCompanion actions | Normal, Visual |
|
||||
| `<leader>cm` | Show current model | Normal |
|
||||
|
||||
## 🔧 Setup Checklist
|
||||
|
||||
### On Ollama Server
|
||||
- [ ] `sudo systemctl edit ollama` → Add `Environment="OLLAMA_HOST=0.0.0.0:11434"`
|
||||
- [ ] `sudo systemctl restart ollama`
|
||||
- [ ] `ollama pull mistral` (or your preferred model)
|
||||
- [ ] `tailscale ip -4` → Note the IP (e.g., 100.123.45.67)
|
||||
|
||||
### On Other Machines
|
||||
- [ ] Add to `~/.zshrc` (or `~/.bashrc`):
|
||||
```bash
|
||||
export OLLAMA_ENDPOINT="http://100.123.45.67:11434"
|
||||
```
|
||||
- [ ] `source ~/.zshrc` (reload shell)
|
||||
- [ ] `curl $OLLAMA_ENDPOINT/api/tags` (test connection)
|
||||
- [ ] Start Neovim and press `<leader>cll`
|
||||
|
||||
## 🧪 Quick Tests
|
||||
|
||||
```bash
|
||||
# Test Ollama is running
|
||||
curl http://localhost:11434/api/tags
|
||||
|
||||
# Test remote access
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
|
||||
# Test Tailscale
|
||||
tailscale status
|
||||
ping 100.x.x.x
|
||||
|
||||
# List models
|
||||
ollama list
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
```
|
||||
|
||||
## 📊 Model Comparison
|
||||
|
||||
```
|
||||
┌──────────────┬──────┬───────┬─────────┬──────────────┐
|
||||
│ Model │ Size │ Speed │ Quality │ Best For │
|
||||
├──────────────┼──────┼───────┼─────────┼──────────────┤
|
||||
│ orca-mini │ 3B │ ⚡⚡⚡ │ ⭐⭐ │ Quick answers│
|
||||
│ mistral │ 7B │ ⚡⚡ │ ⭐⭐⭐ │ Coding │
|
||||
│ neural-chat │ 7B │ ⚡⚡ │ ⭐⭐⭐ │ Chat │
|
||||
│ llama2 │ 7B │ ⚡⚡ │ ⭐⭐⭐ │ General │
|
||||
│ dolphin-mix │ 8x7B │ ⚡ │ ⭐⭐⭐⭐│ Complex │
|
||||
└──────────────┴──────┴───────┴─────────┴──────────────┘
|
||||
```
|
||||
|
||||
## 🔍 Troubleshooting Quick Fixes
|
||||
|
||||
| Problem | Quick Fix |
|
||||
|---------|-----------|
|
||||
| Connection refused | `ps aux \| grep ollama` (check if running) |
|
||||
| Model not found | `ollama pull mistral` |
|
||||
| Can't reach remote | `ping 100.x.x.x` (check Tailscale) |
|
||||
| Env var not working | `echo $OLLAMA_ENDPOINT` (verify it's set) |
|
||||
| Slow responses | Try smaller model: `ollama pull orca-mini` |
|
||||
|
||||
## 📁 Important Files
|
||||
|
||||
| File | Purpose |
|
||||
|------|---------|
|
||||
| `lua/shelbybark/plugins/codecompanion.lua` | Main config (modified) |
|
||||
| `docs/OLLAMA_SETUP.md` | Full setup guide |
|
||||
| `docs/TROUBLESHOOTING.md` | Detailed troubleshooting |
|
||||
| `docs/ARCHITECTURE.md` | Network diagrams |
|
||||
| `docs/IMPLEMENTATION_CHECKLIST.md` | Step-by-step checklist |
|
||||
|
||||
## 🌐 Network Setup
|
||||
|
||||
```
|
||||
Machine A (Ollama Server)
|
||||
├─ Ollama: http://localhost:11434
|
||||
├─ Tailscale IP: 100.123.45.67
|
||||
└─ OLLAMA_HOST=0.0.0.0:11434
|
||||
|
||||
Machine B (Client)
|
||||
├─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
└─ Connects via Tailscale VPN
|
||||
|
||||
Machine C (Client)
|
||||
├─ OLLAMA_ENDPOINT=http://100.123.45.67:11434
|
||||
└─ Connects via Tailscale VPN
|
||||
```
|
||||
|
||||
## 💾 Environment Variable
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc, ~/.bashrc, or ~/.config/fish/config.fish
|
||||
export OLLAMA_ENDPOINT=\"http://100.123.45.67:11434\"
|
||||
|
||||
# Then reload
|
||||
source ~/.zshrc # or ~/.bashrc
|
||||
```
|
||||
|
||||
## 🚀 Usage Flow
|
||||
|
||||
```
|
||||
1. Press <leader>cll
|
||||
↓
|
||||
2. CodeCompanion opens chat window
|
||||
↓
|
||||
3. Reads OLLAMA_ENDPOINT env var
|
||||
↓
|
||||
4. Connects to Ollama server
|
||||
↓
|
||||
5. Type message and press Enter
|
||||
↓
|
||||
6. Ollama generates response
|
||||
↓
|
||||
7. Response appears in Neovim
|
||||
```
|
||||
|
||||
## 📞 Help Commands
|
||||
|
||||
```bash
|
||||
# Check Ollama status
|
||||
sudo systemctl status ollama
|
||||
|
||||
# View Ollama logs
|
||||
journalctl -u ollama -f
|
||||
|
||||
# List available models
|
||||
ollama list
|
||||
|
||||
# Pull a model
|
||||
ollama pull <model-name>
|
||||
|
||||
# Check Tailscale
|
||||
tailscale status
|
||||
|
||||
# Find your Tailscale IP
|
||||
tailscale ip -4
|
||||
|
||||
# Test connection
|
||||
curl http://localhost:11434/api/tags
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
|
||||
## ⚡ Performance Tips
|
||||
|
||||
1. **Use 7B models** for best balance (mistral, neural-chat)
|
||||
2. **Avoid 13B+ models** on slow networks
|
||||
3. **Monitor latency**: `ping 100.x.x.x` (should be < 50ms)
|
||||
4. **Run on GPU** if available for faster inference
|
||||
5. **Close other apps** to free up resources
|
||||
|
||||
## 🔐 Security Checklist
|
||||
|
||||
- ✅ Ollama only accessible via Tailscale
|
||||
- ✅ All traffic encrypted end-to-end
|
||||
- ✅ Uses private Tailscale IPs (100.x.x.x)
|
||||
- ✅ No exposure to public internet
|
||||
- ✅ Firewall rules can further restrict access
|
||||
|
||||
## 📋 Common Commands
|
||||
|
||||
```bash
|
||||
# Start Ollama
|
||||
ollama serve
|
||||
|
||||
# Or with systemd
|
||||
sudo systemctl start ollama
|
||||
|
||||
# Pull a model
|
||||
ollama pull mistral
|
||||
|
||||
# List models
|
||||
ollama list
|
||||
|
||||
# Remove a model
|
||||
ollama rm mistral
|
||||
|
||||
# Test connection
|
||||
curl http://localhost:11434/api/tags | jq '.models[].name'
|
||||
|
||||
# Check Tailscale
|
||||
tailscale status
|
||||
|
||||
# Restart Ollama
|
||||
sudo systemctl restart ollama
|
||||
```
|
||||
|
||||
## 🎓 Learning Resources
|
||||
|
||||
- Ollama: https://github.com/ollama/ollama
|
||||
- Tailscale: https://tailscale.com/kb/
|
||||
- CodeCompanion: https://github.com/olimorris/codecompanion.nvim
|
||||
- Neovim: https://neovim.io/
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Default model: `mistral` (change in codecompanion.lua line 40)
|
||||
- Default endpoint: `http://localhost:11434` (override with env var)
|
||||
- Keymaps use `<leader>` (usually `\` or `,`)
|
||||
- All documentation in `docs/` folder
|
||||
|
||||
---
|
||||
|
||||
**Print this card and keep it handy!**
|
||||
|
||||
**Last Updated**: 2026-02-05
|
||||
460
docs/TROUBLESHOOTING.md
Normal file
460
docs/TROUBLESHOOTING.md
Normal file
@@ -0,0 +1,460 @@
|
||||
# Troubleshooting Guide
|
||||
|
||||
## Common Issues and Solutions
|
||||
|
||||
### 1. Connection Refused Error
|
||||
|
||||
**Error Message:**
|
||||
```
|
||||
Error: Connection refused
|
||||
Failed to connect to http://localhost:11434
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
- Ollama service is not running
|
||||
- Ollama is not bound to the correct interface
|
||||
- Port 11434 is in use by another service
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check if Ollama is running
|
||||
ps aux | grep ollama
|
||||
|
||||
# If not running, start it
|
||||
ollama serve
|
||||
|
||||
# Or if using systemd
|
||||
sudo systemctl start ollama
|
||||
sudo systemctl status ollama
|
||||
|
||||
# Check if port is in use
|
||||
sudo netstat -tlnp | grep 11434
|
||||
lsof -i :11434
|
||||
|
||||
# If another service is using it, either:
|
||||
# 1. Stop the other service
|
||||
# 2. Change Ollama port (advanced)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. Model Not Found Error
|
||||
|
||||
**Error Message:**
|
||||
```
|
||||
Error: Model 'mistral' not found
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
- Model hasn't been pulled yet
|
||||
- Model name is incorrect
|
||||
- Ollama cache is corrupted
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# List available models
|
||||
curl http://localhost:11434/api/tags | jq '.models[].name'
|
||||
|
||||
# Pull the model
|
||||
ollama pull mistral
|
||||
|
||||
# Or pull a different model
|
||||
ollama pull neural-chat
|
||||
ollama pull llama2
|
||||
ollama pull dolphin-mixtral
|
||||
|
||||
# Verify it was pulled
|
||||
ollama list
|
||||
|
||||
# If issues persist, remove and re-pull
|
||||
ollama rm mistral
|
||||
ollama pull mistral
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. Tailscale Connection Issues
|
||||
|
||||
**Error Message:**
|
||||
```
|
||||
Error: Connection refused to 100.x.x.x:11434
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
- Tailscale is not running
|
||||
- Machines are not on the same Tailscale network
|
||||
- Firewall is blocking the connection
|
||||
- Tailscale IP is incorrect
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check Tailscale status
|
||||
tailscale status
|
||||
|
||||
# If not running, start it
|
||||
sudo systemctl start tailscaled
|
||||
tailscale up
|
||||
|
||||
# Verify you're logged in
|
||||
tailscale whoami
|
||||
|
||||
# Check your Tailscale IP
|
||||
tailscale ip -4
|
||||
|
||||
# Ping the remote machine
|
||||
ping 100.x.x.x
|
||||
|
||||
# Check if Ollama is accessible from remote
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
|
||||
# If firewall is blocking, check UFW
|
||||
sudo ufw status
|
||||
sudo ufw allow 11434/tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. Slow Responses
|
||||
|
||||
**Symptoms:**
|
||||
- Responses take 30+ seconds
|
||||
- Neovim appears frozen
|
||||
- High CPU usage on Ollama server
|
||||
|
||||
**Causes:**
|
||||
- Model is too large for available resources
|
||||
- Network latency is high
|
||||
- Server is running other heavy processes
|
||||
- Ollama is running on CPU instead of GPU
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check server resources
|
||||
top
|
||||
htop
|
||||
|
||||
# Check if Ollama is using GPU
|
||||
nvidia-smi # For NVIDIA GPUs
|
||||
rocm-smi # For AMD GPUs
|
||||
|
||||
# Try a smaller model
|
||||
ollama pull orca-mini # 3B model, very fast
|
||||
ollama pull neural-chat # 7B model, good balance
|
||||
|
||||
# Check network latency
|
||||
ping 100.x.x.x
|
||||
# Look for latency > 50ms (indicates network issue)
|
||||
|
||||
# Monitor Ollama performance
|
||||
curl http://localhost:11434/api/tags | jq '.models[] | {name, size}'
|
||||
|
||||
# Stop other processes on the server
|
||||
sudo systemctl stop other-service
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 5. Environment Variable Not Working
|
||||
|
||||
**Symptoms:**
|
||||
- `OLLAMA_ENDPOINT` is set but not being used
|
||||
- Still trying to connect to localhost
|
||||
|
||||
**Causes:**
|
||||
- Environment variable not exported
|
||||
- Shell not reloaded after setting variable
|
||||
- Variable set in wrong shell config file
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Verify the variable is set
|
||||
echo $OLLAMA_ENDPOINT
|
||||
|
||||
# If empty, add to shell config
|
||||
# For zsh (~/.zshrc):
|
||||
echo 'export OLLAMA_ENDPOINT="http://100.x.x.x:11434"' >> ~/.zshrc
|
||||
source ~/.zshrc
|
||||
|
||||
# For bash (~/.bashrc):
|
||||
echo 'export OLLAMA_ENDPOINT="http://100.x.x.x:11434"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
|
||||
# For fish (~/.config/fish/config.fish):
|
||||
echo 'set -gx OLLAMA_ENDPOINT "http://100.x.x.x:11434"' >> ~/.config/fish/config.fish
|
||||
source ~/.config/fish/config.fish
|
||||
|
||||
# Verify it's set
|
||||
echo $OLLAMA_ENDPOINT
|
||||
|
||||
# Restart Neovim to pick up the new variable
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 6. Ollama Not Accessible from Network
|
||||
|
||||
**Symptoms:**
|
||||
- Works on localhost
|
||||
- Fails when connecting from another machine
|
||||
- `curl http://100.x.x.x:11434/api/tags` fails
|
||||
|
||||
**Causes:**
|
||||
- Ollama is only bound to localhost (127.0.0.1)
|
||||
- Firewall is blocking port 11434
|
||||
- Network connectivity issue
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check what Ollama is bound to
|
||||
sudo netstat -tlnp | grep ollama
|
||||
# Should show 0.0.0.0:11434 or your IP, not 127.0.0.1:11434
|
||||
|
||||
# If bound to localhost only, fix it:
|
||||
sudo systemctl edit ollama
|
||||
|
||||
# Add this line in the [Service] section:
|
||||
# Environment="OLLAMA_HOST=0.0.0.0:11434"
|
||||
|
||||
# Save and restart
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Verify it's now listening on all interfaces
|
||||
sudo netstat -tlnp | grep ollama
|
||||
|
||||
# Check firewall
|
||||
sudo ufw status
|
||||
sudo ufw allow 11434/tcp
|
||||
sudo ufw reload
|
||||
|
||||
# Test from another machine
|
||||
curl http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 7. Neovim CodeCompanion Not Recognizing Ollama
|
||||
|
||||
**Symptoms:**
|
||||
- Ollama adapter not available
|
||||
- `<leader>cll` doesn't work
|
||||
- Error about unknown adapter
|
||||
|
||||
**Causes:**
|
||||
- CodeCompanion plugin not loaded
|
||||
- Ollama adapter not properly configured
|
||||
- Neovim config not reloaded
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```vim
|
||||
" In Neovim:
|
||||
|
||||
" Check if CodeCompanion is loaded
|
||||
:checkhealth codecompanion
|
||||
|
||||
" Reload config
|
||||
:source ~/.config/nvim/init.lua
|
||||
|
||||
" Or restart Neovim completely
|
||||
:qa!
|
||||
nvim
|
||||
|
||||
" Check available adapters
|
||||
:CodeCompanionChat <Tab>
|
||||
" Should show: ollama, anthropic, anthropic_opus, anthropic_haiku
|
||||
|
||||
" Test the adapter
|
||||
:CodeCompanionChat ollama Toggle
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 8. Ollama Server Crashes
|
||||
|
||||
**Symptoms:**
|
||||
- Ollama process dies unexpectedly
|
||||
- Connection drops mid-conversation
|
||||
- Out of memory errors
|
||||
|
||||
**Causes:**
|
||||
- Insufficient RAM
|
||||
- Model is too large
|
||||
- System is under heavy load
|
||||
- Ollama bug
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check system resources
|
||||
free -h
|
||||
df -h
|
||||
|
||||
# Check Ollama logs
|
||||
journalctl -u ollama -n 50
|
||||
journalctl -u ollama -f # Follow logs
|
||||
|
||||
# Check if model is too large
|
||||
ollama list
|
||||
# Compare model size with available RAM
|
||||
|
||||
# Reduce model size
|
||||
ollama rm mistral
|
||||
ollama pull orca-mini # Smaller model
|
||||
|
||||
# Increase swap (temporary fix)
|
||||
sudo fallocate -l 4G /swapfile
|
||||
sudo chmod 600 /swapfile
|
||||
sudo mkswap /swapfile
|
||||
sudo swapon /swapfile
|
||||
|
||||
# Monitor while running
|
||||
watch -n 1 'free -h && echo "---" && ps aux | grep ollama'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 9. Timeout Errors
|
||||
|
||||
**Error Message:**
|
||||
```
|
||||
Error: Request timeout
|
||||
Connection timed out after 30 seconds
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
- Model is taking too long to respond
|
||||
- Network latency is too high
|
||||
- Server is overloaded
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check network latency
|
||||
ping -c 5 100.x.x.x
|
||||
# Acceptable: < 50ms
|
||||
# Marginal: 50-100ms
|
||||
# Poor: > 100ms
|
||||
|
||||
# Try a faster model
|
||||
ollama pull orca-mini
|
||||
|
||||
# Check server load
|
||||
ssh user@100.x.x.x
|
||||
top
|
||||
# Look for high CPU or memory usage
|
||||
|
||||
# Reduce concurrent requests
|
||||
# Only run one CodeCompanion chat at a time
|
||||
|
||||
# Increase timeout in CodeCompanion (if supported)
|
||||
# Check CodeCompanion documentation
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 10. Permission Denied Errors
|
||||
|
||||
**Error Message:**
|
||||
```
|
||||
Error: Permission denied
|
||||
Cannot access /var/lib/ollama
|
||||
```
|
||||
|
||||
**Causes:**
|
||||
- Ollama service running as different user
|
||||
- File permissions are incorrect
|
||||
- SELinux or AppArmor restrictions
|
||||
|
||||
**Solutions:**
|
||||
|
||||
```bash
|
||||
# Check Ollama service user
|
||||
sudo systemctl show -p User ollama
|
||||
|
||||
# Fix permissions
|
||||
sudo chown -R ollama:ollama /var/lib/ollama
|
||||
sudo chmod -R 755 /var/lib/ollama
|
||||
|
||||
# Restart service
|
||||
sudo systemctl restart ollama
|
||||
|
||||
# Check SELinux (if applicable)
|
||||
getenforce
|
||||
# If "Enforcing", may need to adjust policies
|
||||
|
||||
# Check AppArmor (if applicable)
|
||||
sudo aa-status | grep ollama
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Quick Diagnostic Script
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Save as: ~/check_ollama.sh
|
||||
# Run with: bash ~/check_ollama.sh
|
||||
|
||||
echo "=== Ollama Diagnostic Check ==="
|
||||
echo
|
||||
|
||||
echo "1. Ollama Service Status:"
|
||||
sudo systemctl status ollama --no-pager | head -5
|
||||
echo
|
||||
|
||||
echo "2. Ollama Process:"
|
||||
ps aux | grep ollama | grep -v grep || echo "Not running"
|
||||
echo
|
||||
|
||||
echo "3. Port Binding:"
|
||||
sudo netstat -tlnp | grep 11434 || echo "Not listening on 11434"
|
||||
echo
|
||||
|
||||
echo "4. Available Models:"
|
||||
curl -s http://localhost:11434/api/tags | jq '.models[].name' 2>/dev/null || echo "Cannot connect to Ollama"
|
||||
echo
|
||||
|
||||
echo "5. Tailscale Status:"
|
||||
tailscale status --self 2>/dev/null || echo "Tailscale not running"
|
||||
echo
|
||||
|
||||
echo "6. System Resources:"
|
||||
echo "Memory: $(free -h | grep Mem | awk '{print $3 "/" $2}')"
|
||||
echo "Disk: $(df -h / | tail -1 | awk '{print $3 "/" $2}')"
|
||||
echo
|
||||
|
||||
echo "=== End Diagnostic ==="
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you're still having issues:
|
||||
|
||||
1. **Check the logs:**
|
||||
```bash
|
||||
journalctl -u ollama -n 100
|
||||
```
|
||||
|
||||
2. **Test connectivity:**
|
||||
```bash
|
||||
curl -v http://localhost:11434/api/tags
|
||||
curl -v http://100.x.x.x:11434/api/tags
|
||||
```
|
||||
|
||||
3. **Check Neovim messages:**
|
||||
```vim
|
||||
:messages
|
||||
```
|
||||
|
||||
4. **Report issues:**
|
||||
- Ollama: https://github.com/ollama/ollama/issues
|
||||
- CodeCompanion: https://github.com/olimorris/codecompanion.nvim/issues
|
||||
- Tailscale: https://github.com/tailscale/tailscale/issues
|
||||
44
docs/ollama_env_example.sh
Normal file
44
docs/ollama_env_example.sh
Normal file
@@ -0,0 +1,44 @@
|
||||
#!/bin/bash
|
||||
# Example shell configuration for Ollama endpoint
|
||||
# Add this to your ~/.zshrc, ~/.bashrc, or ~/.config/fish/config.fish
|
||||
|
||||
# ============================================================================
|
||||
# OLLAMA CONFIGURATION
|
||||
# ============================================================================
|
||||
|
||||
# Set your Ollama server's Tailscale IP here
|
||||
# Find it by running: tailscale ip -4 on your Ollama server
|
||||
OLLAMA_SERVER_IP=\"100.123.45.67\" # CHANGE THIS TO YOUR TAILSCALE IP
|
||||
|
||||
# Set the Ollama endpoint
|
||||
export OLLAMA_ENDPOINT=\"http://${OLLAMA_SERVER_IP}:11434\"
|
||||
|
||||
# Optional: Add a function to quickly test the connection
|
||||
ollama_test() {
|
||||
echo \"Testing Ollama connection to ${OLLAMA_ENDPOINT}...\"
|
||||
if curl -s \"${OLLAMA_ENDPOINT}/api/tags\" > /dev/null; then
|
||||
echo \"✓ Ollama is reachable\"
|
||||
echo \"Available models:\"
|
||||
curl -s \"${OLLAMA_ENDPOINT}/api/tags\" | jq '.models[].name' 2>/dev/null || echo \"(Could not parse models)\"
|
||||
else
|
||||
echo \"✗ Ollama is not reachable at ${OLLAMA_ENDPOINT}\"
|
||||
echo \"Troubleshooting:\"
|
||||
echo \"1. Verify Tailscale is running: tailscale status\"
|
||||
echo \"2. Verify Ollama is running on the server\"
|
||||
echo \"3. Check the Tailscale IP is correct\"
|
||||
fi
|
||||
}
|
||||
|
||||
# Optional: Add a function to list available models
|
||||
ollama_models() {
|
||||
curl -s \"${OLLAMA_ENDPOINT}/api/tags\" | jq '.models[] | {name: .name, size: .size}' 2>/dev/null || echo \"Could not fetch models\"
|
||||
}
|
||||
|
||||
# ============================================================================
|
||||
# INSTRUCTIONS
|
||||
# ============================================================================
|
||||
# 1. Replace \"100.123.45.67\" with your actual Tailscale IP
|
||||
# 2. Add this to your shell config file
|
||||
# 3. Reload your shell: source ~/.zshrc (or ~/.bashrc, etc.)
|
||||
# 4. Test with: ollama_test
|
||||
# 5. Use in Neovim: press <leader>cll to chat with Ollama
|
||||
@@ -15,9 +15,22 @@ return {
|
||||
require("shelbybark.plugins.codecompanion.fidget-spinner"):init()
|
||||
end,
|
||||
config = function()
|
||||
-- Store config in a module-level variable for later access
|
||||
local codecompanion_config = {
|
||||
strategies = {
|
||||
chat = {
|
||||
adapter = "anthropic_haiku",
|
||||
},
|
||||
inline = {
|
||||
adapter = "anthropic_haiku",
|
||||
},
|
||||
},
|
||||
}
|
||||
_G.codecompanion_config = codecompanion_config
|
||||
|
||||
require("codecompanion").setup({
|
||||
ignore_warnings = true,
|
||||
strategies = {
|
||||
strategies = {
|
||||
chat = {
|
||||
adapter = "anthropic_haiku",
|
||||
},
|
||||
@@ -107,14 +120,7 @@ return {
|
||||
|
||||
-- Create commands to show and change current model
|
||||
vim.api.nvim_create_user_command("CodeCompanionModel", function()
|
||||
local ok, codecompanion = pcall(require, "codecompanion")
|
||||
if not ok then
|
||||
vim.notify("CodeCompanion not available", vim.log.levels.ERROR)
|
||||
return
|
||||
end
|
||||
|
||||
-- Get current adapter info
|
||||
local current_adapter = codecompanion.config.strategies.chat.adapter
|
||||
local current_adapter = _G.codecompanion_config.strategies.chat.adapter
|
||||
local model_info = "Unknown"
|
||||
|
||||
if current_adapter == "anthropic" then
|
||||
@@ -150,8 +156,8 @@ return {
|
||||
end
|
||||
|
||||
-- Update the config
|
||||
require("codecompanion").config.strategies.chat.adapter = adapter
|
||||
require("codecompanion").config.strategies.inline.adapter = adapter
|
||||
_G.codecompanion_config.strategies.chat.adapter = adapter
|
||||
_G.codecompanion_config.strategies.inline.adapter = adapter
|
||||
|
||||
vim.notify(string.format("Switched to %s model", model), vim.log.levels.INFO)
|
||||
|
||||
|
||||
@@ -1,80 +1,70 @@
|
||||
-- Treesitter configuration for syntax highlighting and text objects
|
||||
-- Treesitter configuration for syntax highlighting
|
||||
-- Note: The new version of nvim-treesitter (post June 2023) dropped the module system.
|
||||
-- Highlighting is now handled by Neovim's native treesitter API.
|
||||
-- Text objects are handled by Neovim's native treesitter text objects (0.10+)
|
||||
return {
|
||||
{
|
||||
"nvim-treesitter/nvim-treesitter",
|
||||
lazy = false,
|
||||
priority = 1000,
|
||||
build = ":TSUpdate",
|
||||
dependencies = {
|
||||
{
|
||||
"nvim-treesitter/nvim-treesitter-textobjects",
|
||||
lazy = false,
|
||||
},
|
||||
},
|
||||
config = function()
|
||||
require("nvim-treesitter.config").setup({
|
||||
ensure_installed = {
|
||||
"astro",
|
||||
"bash",
|
||||
"c",
|
||||
"css",
|
||||
"diff",
|
||||
"go",
|
||||
"gomod",
|
||||
"gowork",
|
||||
"gosum",
|
||||
"graphql",
|
||||
"html",
|
||||
"javascript",
|
||||
"jsdoc",
|
||||
"json",
|
||||
"jsonc",
|
||||
"json5",
|
||||
"lua",
|
||||
"luadoc",
|
||||
"luap",
|
||||
"markdown",
|
||||
"markdown_inline",
|
||||
"python",
|
||||
"query",
|
||||
"regex",
|
||||
"toml",
|
||||
"tsx",
|
||||
"typescript",
|
||||
"vim",
|
||||
"vimdoc",
|
||||
"yaml",
|
||||
"ruby",
|
||||
},
|
||||
sync_install = false,
|
||||
auto_install = true,
|
||||
highlight = {
|
||||
enable = true,
|
||||
additional_vim_regex_highlighting = false,
|
||||
},
|
||||
indent = { enable = true },
|
||||
incremental_selection = {
|
||||
enable = true,
|
||||
keymaps = {
|
||||
init_selection = "<C-space>",
|
||||
node_incremental = "<C-space>",
|
||||
scope_incremental = false,
|
||||
node_decremental = "<bs>",
|
||||
},
|
||||
},
|
||||
textobjects = {
|
||||
select = {
|
||||
enable = true,
|
||||
lookahead = true,
|
||||
keymaps = {
|
||||
["af"] = "@function.outer",
|
||||
["if"] = "@function.inner",
|
||||
["ac"] = "@class.outer",
|
||||
["ic"] = "@class.inner",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
-- Install parsers
|
||||
require("nvim-treesitter").install({
|
||||
"astro",
|
||||
"bash",
|
||||
"c",
|
||||
"css",
|
||||
"diff",
|
||||
"go",
|
||||
"gomod",
|
||||
"gowork",
|
||||
"gosum",
|
||||
"graphql",
|
||||
"html",
|
||||
"javascript",
|
||||
"jsdoc",
|
||||
"json",
|
||||
"jsonc",
|
||||
"json5",
|
||||
"lua",
|
||||
"luadoc",
|
||||
"luap",
|
||||
"markdown",
|
||||
"markdown_inline",
|
||||
"python",
|
||||
"query",
|
||||
"regex",
|
||||
"toml",
|
||||
"tsx",
|
||||
"typescript",
|
||||
"vim",
|
||||
"vimdoc",
|
||||
"yaml",
|
||||
"ruby",
|
||||
})
|
||||
|
||||
-- Enable treesitter highlighting for supported filetypes
|
||||
vim.api.nvim_create_autocmd("FileType", {
|
||||
callback = function()
|
||||
local ok = pcall(vim.treesitter.start)
|
||||
if not ok then
|
||||
-- Parser not available for this filetype
|
||||
end
|
||||
end,
|
||||
})
|
||||
|
||||
-- Enable treesitter-based indentation for supported filetypes
|
||||
vim.api.nvim_create_autocmd("FileType", {
|
||||
callback = function()
|
||||
local ok = pcall(function()
|
||||
vim.opt_local.indentexpr = "v:lua.vim.treesitter.indentexpr()"
|
||||
end)
|
||||
if not ok then
|
||||
-- Parser not available for this filetype
|
||||
end
|
||||
end,
|
||||
})
|
||||
end,
|
||||
},
|
||||
{
|
||||
@@ -86,4 +76,3 @@ return {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user