1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"This notebook replicates running Open Interpreter locally and uses Llama3 via llamafile"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Download LLama3\n",
"\n",
"# Download the Meta-Llama-3-8B-Instruct.llamafile\n",
"!curl -L -o Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile 'https://huggingface.co/Mozilla/Meta-Llama-3-8B-Instruct-llamafile/resolve/main/Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile?download=true'\n",
"\n",
"# Make the downloaded file executable\n",
"!chmod +x Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Install OI\n",
"\n",
"!pip install open-interpreter --quiet\n",
"!pip install opencv-python --quiet"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Configure OI\n",
"\n",
"import cv2\n",
"import subprocess\n",
"from interpreter import interpreter\n",
"\n",
"interpreter.offline = True\n",
"\n",
"interpreter.llm.model = \"openai/local\" # Tells OI to use an OpenAI-compatible server\n",
"interpreter.llm.api_key = \"dummy_key\"\n",
"interpreter.llm.api_base = \"http://localhost:8081/v1\"\n",
"interpreter.llm.context_window = 7000\n",
"interpreter.llm.max_tokens = 1000\n",
"interpreter.llm.supports_functions = False"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"## Start server, run OI\n",
"\n",
"import subprocess\n",
"import threading\n",
"import os\n",
"\n",
"def read_output(process):\n",
" while True:\n",
" output = process.stdout.readline()\n",
" if output == b'' and process.poll() is not None:\n",
" break\n",
" if output:\n",
" print(output.decode().strip())\n",
"\n",
"# Check if the file exists and has execute permissions\n",
"file_path = os.path.abspath('Meta-Llama-3-8B-Instruct.Q5_K_M.llamafile')\n",
"\n",
"# Why are the arguments not being used??\n",
"command = [file_path, \"--nobrowser\", \"-ngl\", \"9999\"]\n",
"print(command)\n",
"\n",
"# Setting up the Popen call with stderr redirected to stdout\n",
"process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n",
"\n",
"# Thread to handle the output asynchronously\n",
"thread = threading.Thread(target=read_output, args=(process,))\n",
"thread.start()\n",
"\n",
"# Here you can do other tasks concurrently\n",
"# For example:\n",
"interpreter.chat()\n",
"\n",
"# Wait for the thread to finish if the process completes\n",
"thread.join()\n",
"\n",
"# Ensure the process has completed\n",
"process.wait()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "oi",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.9"
}
},
"nbformat": 4,
"nbformat_minor": 2
}