Skip to content

Commit 2cacac0

Browse files
committed
Adjust how messages are built to match new vscode requests
- Use new VSCode format for embedding files - Use new VSCode format for active selection - Use new VSCode explain and generic prompts - Remove workspace prompt, we dont have workspace support so for now no need for it rly and it can easily become outdated too Signed-off-by: Tomas Slusny <slusnucky@gmail.com>
1 parent a1d97c7 commit 2cacac0

File tree

4 files changed

+110
-185
lines changed

4 files changed

+110
-185
lines changed

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -114,11 +114,11 @@ Verify "[Copilot chat in the IDE](https://github.com/settings/copilot)" is enabl
114114

115115
#### Commands coming from default prompts
116116

117-
- `:CopilotChatExplain` - Write an explanation for the active selection and diagnostics as paragraphs of text
117+
- `:CopilotChatExplain` - Write an explanation for the selected code and diagnostics as paragraphs of text
118118
- `:CopilotChatReview` - Review the selected code
119119
- `:CopilotChatFix` - There is a problem in this code. Rewrite the code to show it with the bug fixed
120120
- `:CopilotChatOptimize` - Optimize the selected code to improve performance and readability
121-
- `:CopilotChatDocs` - Please add documentation comment for the selection
121+
- `:CopilotChatDocs` - Please add documentation comments to the selected code
122122
- `:CopilotChatTests` - Please generate tests for my code
123123
- `:CopilotChatCommit` - Write commit message for the change with commitizen convention
124124

@@ -230,7 +230,7 @@ Also see [here](/lua/CopilotChat/config.lua):
230230
-- default prompts
231231
prompts = {
232232
Explain = {
233-
prompt = '/COPILOT_EXPLAIN Write an explanation for the active selection and diagnostics as paragraphs of text.',
233+
prompt = '/COPILOT_EXPLAIN Write an explanation for the selected code and diagnostics as paragraphs of text.',
234234
},
235235
Review = {
236236
prompt = '/COPILOT_REVIEW Review the selected code.',
@@ -245,7 +245,7 @@ Also see [here](/lua/CopilotChat/config.lua):
245245
prompt = '/COPILOT_GENERATE Optimize the selected code to improve performance and readability.',
246246
},
247247
Docs = {
248-
prompt = '/COPILOT_GENERATE Please add documentation comment for the selection.',
248+
prompt = '/COPILOT_GENERATE Please add documentation comments to the selected code.',
249249
},
250250
Tests = {
251251
prompt = '/COPILOT_GENERATE Please generate tests for my code.',

lua/CopilotChat/config.lua

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ return {
122122
-- default prompts
123123
prompts = {
124124
Explain = {
125-
prompt = '/COPILOT_EXPLAIN Write an explanation for the active selection and diagnostics as paragraphs of text.',
125+
prompt = '/COPILOT_EXPLAIN Write an explanation for the selected code and diagnostics as paragraphs of text.',
126126
},
127127
Review = {
128128
prompt = '/COPILOT_REVIEW Review the selected code.',
@@ -173,7 +173,7 @@ return {
173173
prompt = '/COPILOT_GENERATE Optimize the selected code to improve performance and readability.',
174174
},
175175
Docs = {
176-
prompt = '/COPILOT_GENERATE Please add documentation comment for the selection.',
176+
prompt = '/COPILOT_GENERATE Please add documentation comments to the selected code.',
177177
},
178178
Tests = {
179179
prompt = '/COPILOT_GENERATE Please generate tests for my code.',

lua/CopilotChat/copilot.lua

Lines changed: 81 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -154,26 +154,42 @@ local function get_cached_token()
154154
return nil
155155
end
156156

157-
local function generate_selection_message(filename, filetype, selection)
157+
local function generate_line_numbers(content, start_row)
158+
local lines = vim.split(content, '\n')
159+
local total_lines = #lines
160+
local max_length = #tostring(total_lines)
161+
for i, line in ipairs(lines) do
162+
local formatted_line_number = string.format('%' .. max_length .. 'd', i - 1 + (start_row or 1))
163+
lines[i] = formatted_line_number .. ': ' .. line
164+
end
165+
content = table.concat(lines, '\n')
166+
return content
167+
end
168+
169+
local function generate_selection_messages(filename, filetype, selection)
158170
local content = selection.lines
159171

160172
if not content or content == '' then
161-
return ''
173+
return {}
162174
end
163175

176+
local out = string.format('# FILE:%s CONTEXT\n', filename:upper())
177+
out = out .. "User's active selection:\n"
164178
if selection.start_row and selection.start_row > 0 then
165-
local lines = vim.split(content, '\n')
166-
local total_lines = #lines
167-
local max_length = #tostring(total_lines)
168-
for i, line in ipairs(lines) do
169-
local formatted_line_number =
170-
string.format('%' .. max_length .. 'd', i - 1 + selection.start_row)
171-
lines[i] = formatted_line_number .. ': ' .. line
172-
end
173-
content = table.concat(lines, '\n')
179+
out = out
180+
.. string.format(
181+
'Excerpt from %s, lines %s to %s:\n',
182+
filename,
183+
selection.start_row,
184+
selection.end_row
185+
)
174186
end
175-
176-
local out = string.format('Active selection: `%s`\n```%s\n%s\n```', filename, filetype, content)
187+
out = out
188+
.. string.format(
189+
'```%s\n%s\n```',
190+
filetype,
191+
generate_line_numbers(content, selection.start_row)
192+
)
177193

178194
if selection.diagnostics then
179195
local diagnostics = {}
@@ -199,14 +215,19 @@ local function generate_selection_message(filename, filetype, selection)
199215
end
200216
end
201217

202-
out =
203-
string.format('%s\nDiagnostics: `%s`\n%s\n', out, filename, table.concat(diagnostics, '\n'))
218+
out = out
219+
.. string.format('\n# FILE:%s DIAGNOSTICS:\n%s', filename, table.concat(diagnostics, '\n'))
204220
end
205221

206-
return out
222+
return {
223+
{
224+
content = out,
225+
role = 'user',
226+
},
227+
}
207228
end
208229

209-
local function generate_embeddings_message(embeddings)
230+
local function generate_embeddings_messages(embeddings)
210231
local files = {}
211232
for _, embedding in ipairs(embeddings) do
212233
local filename = embedding.filename
@@ -216,36 +237,33 @@ local function generate_embeddings_message(embeddings)
216237
table.insert(files[filename], embedding)
217238
end
218239

219-
local out = {
220-
header = 'Open files:\n',
221-
files = {},
222-
}
240+
local out = {}
223241

224242
for filename, group in pairs(files) do
225-
table.insert(
226-
out.files,
227-
string.format(
228-
'File: `%s`\n```%s\n%s\n```\n',
229-
filename,
243+
table.insert(out, {
244+
content = string.format(
245+
'# FILE:%s CONTEXT\n```%s\n%s\n```',
246+
filename:upper(),
230247
group[1].filetype,
231-
table.concat(
248+
generate_line_numbers(table.concat(
232249
vim.tbl_map(function(e)
233250
return vim.trim(e.content)
234251
end, group),
235252
'\n'
236-
)
237-
)
238-
)
253+
))
254+
),
255+
role = 'user',
256+
})
239257
end
258+
240259
return out
241260
end
242261

243262
local function generate_ask_request(
244263
history,
245264
prompt,
246-
embeddings,
247-
selection,
248265
system_prompt,
266+
generated_messages,
249267
model,
250268
temperature,
251269
max_output_tokens,
@@ -261,22 +279,12 @@ local function generate_ask_request(
261279
})
262280
end
263281

264-
for _, message in ipairs(history) do
282+
for _, message in ipairs(generated_messages) do
265283
table.insert(messages, message)
266284
end
267285

268-
if embeddings and #embeddings.files > 0 then
269-
table.insert(messages, {
270-
content = embeddings.header .. table.concat(embeddings.files, ''),
271-
role = system_role,
272-
})
273-
end
274-
275-
if selection ~= '' then
276-
table.insert(messages, {
277-
content = selection,
278-
role = system_role,
279-
})
286+
for _, message in ipairs(history) do
287+
table.insert(messages, message)
280288
end
281289

282290
table.insert(messages, {
@@ -533,21 +541,26 @@ function Copilot:ask(prompt, opts)
533541
log.debug('Tokenizer: ' .. tokenizer)
534542
tiktoken_load(tokenizer)
535543

536-
local selection_message = generate_selection_message(filename, filetype, selection)
537-
local embeddings_message = generate_embeddings_message(embeddings)
544+
local generated_messages = {}
545+
local selection_messages = generate_selection_messages(filename, filetype, selection)
546+
local embeddings_messages = generate_embeddings_messages(embeddings)
547+
local generated_tokens = 0
548+
for _, message in ipairs(selection_messages) do
549+
generated_tokens = generated_tokens + tiktoken.count(message.content)
550+
table.insert(generated_messages, message)
551+
end
538552

539553
-- Count required tokens that we cannot reduce
540554
local prompt_tokens = tiktoken.count(prompt)
541555
local system_tokens = tiktoken.count(system_prompt)
542-
local selection_tokens = tiktoken.count(selection_message)
543-
local required_tokens = prompt_tokens + system_tokens + selection_tokens
556+
local required_tokens = prompt_tokens + system_tokens + generated_tokens
544557

545558
-- Reserve space for first embedding if its smaller than half of max tokens
546559
local reserved_tokens = 0
547-
if #embeddings_message.files > 0 then
548-
local file_tokens = tiktoken.count(embeddings_message.files[1])
560+
if #embeddings_messages > 0 then
561+
local file_tokens = tiktoken.count(embeddings_messages[1].content)
549562
if file_tokens < max_tokens / 2 then
550-
reserved_tokens = tiktoken.count(embeddings_message.header) + file_tokens
563+
reserved_tokens = file_tokens
551564
end
552565
end
553566

@@ -563,21 +576,24 @@ function Copilot:ask(prompt, opts)
563576

564577
-- Now add as many files as possible with remaining token budget
565578
local remaining_tokens = max_tokens - required_tokens - history_tokens
566-
if #embeddings_message.files > 0 then
567-
remaining_tokens = remaining_tokens - tiktoken.count(embeddings_message.header)
568-
local filtered_files = {}
569-
for _, file in ipairs(embeddings_message.files) do
570-
local file_tokens = tiktoken.count(file)
571-
if remaining_tokens - file_tokens >= 0 then
572-
remaining_tokens = remaining_tokens - file_tokens
573-
table.insert(filtered_files, file)
574-
else
575-
break
576-
end
579+
for _, message in ipairs(embeddings_messages) do
580+
local tokens = tiktoken.count(message.content)
581+
if remaining_tokens - tokens >= 0 then
582+
remaining_tokens = remaining_tokens - tokens
583+
table.insert(generated_messages, message)
584+
else
585+
break
577586
end
578-
embeddings_message.files = filtered_files
579587
end
580588

589+
-- Prepend links to embeddings to the prompt
590+
local embeddings_prompt = ''
591+
for _, embedding in ipairs(embeddings) do
592+
embeddings_prompt = embeddings_prompt
593+
.. string.format('[#file:%s](#file:%s-context)\n', embedding.filename, embedding.filename)
594+
end
595+
prompt = embeddings_prompt .. prompt
596+
581597
local last_message = nil
582598
local errored = false
583599
local finished = false
@@ -654,9 +670,8 @@ function Copilot:ask(prompt, opts)
654670
generate_ask_request(
655671
self.history,
656672
prompt,
657-
embeddings_message,
658-
selection_message,
659673
system_prompt,
674+
generated_messages,
660675
model,
661676
temperature,
662677
max_output_tokens,

0 commit comments

Comments
 (0)