51 lines
1.4 KiB
Python
51 lines
1.4 KiB
Python
import ollama
|
||
import subprocess
|
||
|
||
|
||
def shell_command(command: str) -> str:
|
||
"""
|
||
Run a shell command.
|
||
|
||
Args:
|
||
command: the command to run
|
||
|
||
Returns:
|
||
stdout: the stdout
|
||
stderr: the stderr
|
||
"""
|
||
exec = subprocess.run(command, encoding="utf-8", shell=True, capture_output=True)
|
||
return f"stdout:\n{exec.stdout}\n\nstderr:\n{exec.stderr}"
|
||
|
||
|
||
tools = [shell_command] # ollama-python turns this into JSON
|
||
|
||
messages = [{"role": "user", "content": input("prompt > ")}]
|
||
|
||
while True:
|
||
stream = ollama.chat(model="gpt-oss", messages=messages,
|
||
tools=tools, stream=True)
|
||
|
||
ran_tool = False
|
||
for chunk in stream:
|
||
msg = chunk.message
|
||
|
||
# 1️⃣ the model is asking us to run something
|
||
if msg.tool_calls:
|
||
for call in msg.tool_calls:
|
||
if call["function"]["name"] == "shell_command":
|
||
cmd = call["function"]["arguments"]["command"]
|
||
output = shell_command(cmd)
|
||
messages.append({
|
||
"role": "tool",
|
||
"name": "shell_command",
|
||
"content": output
|
||
})
|
||
ran_tool = True
|
||
|
||
# 2️⃣ normal user-visible text
|
||
elif msg.content:
|
||
print(msg.content, end="", flush=True)
|
||
|
||
# loop again if we just satisfied a tool call
|
||
if not ran_tool:
|
||
break
|