Micah Jamison's Dev Blog.
# Barebones NeoVim AI code assistance
This is my barebones setup for code assistance. With AI models moving so fast, the hackable nature of neovim allows for switching with minimal effort. So far I've used this successfully with a DeepSeek running under ollama on a separate computer, Claude, and Gemini.
## Step 1, create a neovim plugin that will call a python script
```lua
MM = {}
function GetOpenBuffers()
local buffers = vim.fn.getbufinfo({buflisted = 1})
local filenames = {}
for _, buf in ipairs(buffers) do
-- Get the full file path
local filename = buf.name
-- Only add if there's actually a filename (not an empty buffer)
if filename and filename ~= '' then
table.insert(filenames, filename)
end
end
-- Concatenate all filenames with a space separator
return table.concat(filenames, ' ')
end
MM.AIChatAboutFile = function(options)
local the_prompt = vim.fn.input "Prompt: "
local args = ' --file_path \\"' .. vim.fn.expand "%" .. '\\" --prompt \\"' .. the_prompt .. '\\"'
if options.lines == 1 then
local start_line = vim.fn.getpos("'<'")[2]
local end_line = vim.fn.getpos("'>'")[2]
args = args .. " --start_line " .. start_line .. " --end_line " .. end_line
end
if options.respond == 1 then
args = args .. " -respond"
end
-- start a new buffer
vim.cmd('new')
if options.buffer_context == 1 then
vim.cmd(
'exec "%!/usr/bin/python3 ' .. vim.fn.expand "$HOME/<path>/continuous_chat.py " .. args .. ' --context ' .. GetOpenBuffers() .. '"'
)
else
vim.cmd(
'exec "%!/usr/bin/python3 ' .. vim.fn.expand "$HOME/<path>/continuous_chat.py " .. args .. '"'
)
end
end
-- visual mode, assume lines selected
vim.api.nvim_set_keymap('v', '<leader>A', ':lua MM.AIChatAboutFile{lines=1}<CR>', {})
vim.api.nvim_set_keymap('v', '<leader>B', ':lua MM.AIChatAboutFile{lines=1, buffer_context=1}<CR>', {})
vim.api.nvim_set_keymap('v', '<leader>R', ':lua MM.AIChatAboutFile{respond=1, lines=1}<CR>', {})
--normal mode
vim.api.nvim_set_keymap('n', '<leader>A', ':lua MM.AIChatAboutFile{}<CR>', {})
vim.api.nvim_set_keymap('n', '<leader>B', ':lua MM.AIChatAboutFile{buffer_context=1}<CR>', {})
vim.api.nvim_set_keymap('n', '<leader>R', ':lua MM.AIChatAboutFile{respond=1}<CR>', {})
```
I saved this to ~/.config/nvim/lua/options.lua as I'm using NvChad.
## Step 2, create the python script
The following python script that constructs the api call to various LLMs
```python
import os
import argparse
import pathlib
import json
from litellm import completion
# os.environ["ANTHROPIC_API_KEY"] = (
# "<anthropic_api_key>"
# )
os.environ["GEMINI_API_KEY"] = "<gemini_api_key>"
API_KEY_MODELS = {
"ANTHROPIC_API_KEY": "claude-3-5-sonnet-20241022",
# "GEMINI_API_KEY": "gemini/gemini-pro",
"GEMINI_API_KEY": "gemini/gemini-2.5-pro-exp-03-25", # pip install google-auth
}
def chat_with_litellm(
file_path,
start_line,
end_line,
prompt,
context_files=None,
respond=False, # use existing history
context_from_source=True,
# edit=False,
):
"""
Interacts with an AI model (via litellm) using context from files and managing chat history.
This function prepares context based on the provided file (`file_path`),
potentially specific line ranges (`start_line`, `end_line`), and optional
additional context files (`context_files`). It sends this context along
with the user's prompt to an AI model using the litellm library.
It handles saving and loading conversation history to enable follow-up
interactions, stored in a JSON file named after the primary `file_path`.
Args:
file_path (str): Path to the primary source file.
start_line (int | None): Starting line number (1-based) of a specific
code snippet from `file_path` to highlight for the AI. If None
or `end_line` is None, no specific lines are highlighted in the
initial system message preamble.
end_line (int | None): Ending line number (1-based) of the specific
code snippet from `file_path`.
prompt (str): The user's query or instruction for the AI model.
context_files (list[str] | None, optional): A list of paths to
additional files to include as context in the chat history.
Defaults to None.
respond (bool, optional): Controls chat history usage.
- If True: Loads existing conversation history (if any) from a
`.json` file (derived from `file_path`) and continues the chat.
The full content of `file_path` is *not* added again, though
`start_line`/`end_line` snippets still are if provided.
- If False: Starts a new chat session. Any existing history file
is deleted. The full content of `file_path` *is* included
as context if `context_from_source` is True.
Defaults to False.
context_from_source (bool, optional): Controls inclusion of the full
primary file content.
- If True and `respond` is False: Includes the *entire* content
of `file_path` as context in the initial message.
- If False or `respond` is True: The full content of `file_path`
is not automatically included (though specific lines might be,
based on `start_line`/`end_line`).
Defaults to True.
Returns:
None: The function prints the AI response to the console and manages
the history file directly. It may also print error messages.
"""
api_key = model = None
# default to lan hosted ollama deepseek if not using claude or gemini
model = "ollama/deepseek-r1:32b"
api_base = "http://192.168.1.225:11434"
# check for api_key env vars, use that model if exists
for key, model in API_KEY_MODELS.items():
api_key = os.getenv(key)
if api_key:
api_base = None
break
# Read the main file
try:
with open(file_path, "r") as file:
lines = file.readlines()
except FileNotFoundError:
print(f"Error: File {file_path} not found")
return
except Exception as e:
print(f"Error reading file: {e}")
return
# add specified lines if set
target_lines = ""
if start_line is not None and end_line is not None:
# Extract the specified lines
if start_line < 1 or end_line > len(lines):
print("Error: Line numbers out of range")
return
target_lines = "".join(lines[start_line - 1 : end_line])
fpath = pathlib.Path(file_path)
message_history_path = fpath.parent / ("." + fpath.stem + ".json")
messages = []
if message_history_path.exists():
if not respond:
os.unlink(message_history_path)
else:
with open(message_history_path, "rb") as history_fl:
try:
messages = json.load(history_fl)
except Exception as e:
print(
f"Warning: Could not read message history file {history_fl}: {e}"
)
# Build context from additional files
context = ""
relative_path = os.path.abspath(os.path.curdir)
if context_files:
for cf in context_files:
try:
with open(cf, "r") as file:
context += f"\nContext from {str(cf).replace(relative_path, '')}:\n```{file.read()}\n```"
except Exception as e:
print(f"Warning: Could not read context file {cf}: {e}")
# Prepare the message for AI model
# always include target_lines if exists
# only include context from source if not responding
# if including context from source, use a specific message of
# target_lines are being passed or not
system_message = ""
if target_lines:
system_message = f"Here are the lines to modify from {str(fpath).replace(relative_path, '')}:\n```{target_lines}```\n"
if context_from_source and not respond:
system_message += f"All lines from {{str(fpath).replace(relative_path, '')}}: \n```{''.join(lines)}```\n"
system_message += f"{context}\n\nUser request: {prompt}\n\n"
# if edit:
# system_message += (
# "\n\nPlease provide only the modified lines without any explanation."
# )
messages.append({"role": "user", "content": system_message})
try:
response = completion(model=model, messages=messages, api_base=api_base)
response_content = response.choices[0].message.content.strip()
ai_response = {"role": "assistant", "content": response_content}
messages.append(ai_response)
with open(message_history_path, "w") as history_fl:
json.dump(messages, history_fl)
# if edit:
# # Update the file
# lines[start_line - 1 : end_line] = [response_content + "\n"]
#
# # Write back to file
# with open(file_path, "w") as file:
# file.writelines(lines)
print(
f"""
## You asked:
{prompt}
{f"Target Lines: {target_lines}" if start_line is not None else ""}
## AI Responsed:
{ response_content}
"""
)
except Exception as e:
print(f"Error calling model: {e}")
def main():
parser = argparse.ArgumentParser(description="Modify file content using AI model")
parser.add_argument("--file_path", help="Path to the file to modify")
parser.add_argument("--prompt", help="Prompt for AI model")
parser.add_argument("--start_line", type=int, help="Starting line number")
parser.add_argument("--end_line", type=int, help="Ending line number")
parser.add_argument("--context", nargs="+", help="Additional context file paths")
parser.add_argument(
"-respond",
action="store_true",
help="Respond to previous conversation",
default=False,
)
parser.add_argument(
"-context_from_source",
action="store_true",
help="Include all lines from source file",
default=True,
)
# parser.add_argument("-edit", action="store_true", help="Edit the file inline")
args = parser.parse_args()
print(f"File path: {args.file_path}")
chat_with_litellm(
args.file_path,
args.start_line,
args.end_line,
args.prompt,
args.context,
args.respond,
args.context_from_source,
)
if __name__ == "__main__":
main()
```
You'll notice some commented out code. I experimented with having the script edit the files directly, but opted intead for the new buffer with suggested changes so I can review the changes first. Most of this should be easy to follow by reading the code as writing out all the options here requires more tying that I'm willing to do EOD Friday.. But I've found most using using `<CTRL-A>` in visual mode with lines selected which starts a new chat. Then I use `<CTRL-R>` in normal mode to repond to the previous conversation about the file with any changes.
@blog