Add copilot instructions

This commit is contained in:
Lars Baunwall 2025-09-29 17:59:38 +02:00
parent 6b20e60f5b
commit e4a24785bb
No known key found for this signature in database
4 changed files with 324 additions and 40 deletions

View file

@ -2,11 +2,51 @@ import * as vscode from 'vscode';
import type { IncomingMessage, ServerResponse } from 'http';
import { state } from '../../state';
import { getBridgeConfig } from '../../config';
import { isChatCompletionRequest, normalizeMessagesLM } from '../../messages';
import { isChatCompletionRequest, normalizeMessagesLM, convertOpenAIToolsToLM, convertFunctionsToTools } from '../../messages';
import { getModel, hasLMApi } from '../../models';
import { readJson, writeErrorResponse, writeJson } from '../utils';
import { verbose } from '../../log';
// OpenAI response interfaces for better typing
interface OpenAIToolCall {
id: string;
type: 'function';
function: {
name: string;
arguments: string;
};
}
interface OpenAIMessage {
role: 'assistant';
content: string | null;
tool_calls?: OpenAIToolCall[];
function_call?: {
name: string;
arguments: string;
};
}
interface OpenAIChoice {
index: number;
message?: OpenAIMessage;
delta?: Partial<OpenAIMessage>;
finish_reason: 'stop' | 'length' | 'tool_calls' | 'content_filter' | 'function_call' | null;
}
interface OpenAIResponse {
id: string;
object: 'chat.completion' | 'chat.completion.chunk';
created: number;
model: string;
choices: OpenAIChoice[];
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
export const handleChatCompletion = async (req: IncomingMessage, res: ServerResponse): Promise<void> => {
const config = getBridgeConfig();
state.activeRequests++;
@ -20,6 +60,14 @@ export const handleChatCompletion = async (req: IncomingMessage, res: ServerResp
const requestedModel = body.model;
const stream = body.stream !== false; // default true
// Handle tools and deprecated functions
let tools = body.tools || [];
if (body.functions) {
// Convert deprecated functions to tools format
tools = [...tools, ...convertFunctionsToTools(body.functions)];
}
const model = await getModel(false, requestedModel);
if (!model) {
@ -33,11 +81,19 @@ export const handleChatCompletion = async (req: IncomingMessage, res: ServerResp
}
const lmMessages = normalizeMessagesLM(body.messages, config.historyWindow) as vscode.LanguageModelChatMessage[];
verbose(`LM request via API model=${model.family || model.id || model.name || 'unknown'}`);
const lmTools = convertOpenAIToolsToLM(tools);
// Prepare request options for Language Model API
const requestOptions: any = {};
if (lmTools.length > 0) {
requestOptions.tools = lmTools;
}
verbose(`LM request via API model=${model.family || model.id || model.name || 'unknown'} tools=${lmTools.length}`);
const cts = new vscode.CancellationTokenSource();
const response = await model.sendRequest(lmMessages, {}, cts.token);
await sendResponse(res, response, stream);
const response = await model.sendRequest(lmMessages, requestOptions, cts.token);
await sendResponse(res, response, stream, body, tools);
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
writeErrorResponse(res, 500, msg || 'internal_error', 'server_error', 'internal_error');
@ -47,40 +103,157 @@ export const handleChatCompletion = async (req: IncomingMessage, res: ServerResp
}
};
const sendResponse = async (res: ServerResponse, response: vscode.LanguageModelChatResponse, stream: boolean): Promise<void> => {
const sendResponse = async (
res: ServerResponse,
response: vscode.LanguageModelChatResponse,
stream: boolean,
requestBody?: any,
tools?: any[]
): Promise<void> => {
const modelName = requestBody?.model || 'copilot';
const responseId = `chatcmpl-${Math.random().toString(36).slice(2)}`;
const created = Math.floor(Date.now() / 1000);
if (stream) {
res.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
});
const id = `cmp_${Math.random().toString(36).slice(2)}`;
verbose(`SSE start id=${id}`);
for await (const fragment of response.text) {
res.write(`data: ${JSON.stringify({
id,
object: 'chat.completion.chunk',
choices: [{ index: 0, delta: { content: fragment } }],
})}\n\n`);
verbose(`SSE start id=${responseId}`);
let toolCalls: OpenAIToolCall[] = [];
for await (const part of response.stream) {
// Check if this part is a LanguageModelToolCallPart
if (part && typeof part === 'object' && 'callId' in part && 'name' in part && 'input' in part) {
const toolCallPart = part as vscode.LanguageModelToolCallPart;
const toolCall: OpenAIToolCall = {
id: toolCallPart.callId,
type: 'function',
function: {
name: toolCallPart.name,
arguments: JSON.stringify(toolCallPart.input)
}
};
toolCalls.push(toolCall);
// Send tool call in streaming format
const chunkResponse: OpenAIResponse = {
id: responseId,
object: 'chat.completion.chunk',
created,
model: modelName,
choices: [{
index: 0,
delta: {
tool_calls: [toolCall]
},
finish_reason: null
}]
};
res.write(`data: ${JSON.stringify(chunkResponse)}\n\n`);
} else if (typeof part === 'string' || (part && typeof part === 'object' && 'value' in part)) {
// Handle text content
const content = typeof part === 'string' ? part : (part as any).value || '';
if (content) {
const chunkResponse: OpenAIResponse = {
id: responseId,
object: 'chat.completion.chunk',
created,
model: modelName,
choices: [{
index: 0,
delta: { content },
finish_reason: null
}]
};
res.write(`data: ${JSON.stringify(chunkResponse)}\n\n`);
}
}
}
verbose(`SSE end id=${id}`);
// Send final chunk
const finishReason: OpenAIChoice['finish_reason'] = toolCalls.length > 0 ? 'tool_calls' : 'stop';
const finalChunkResponse: OpenAIResponse = {
id: responseId,
object: 'chat.completion.chunk',
created,
model: modelName,
choices: [{
index: 0,
delta: {},
finish_reason: finishReason
}]
};
res.write(`data: ${JSON.stringify(finalChunkResponse)}\n\n`);
verbose(`SSE end id=${responseId}`);
res.write('data: [DONE]\n\n');
res.end();
return;
}
// Non-streaming response
let content = '';
for await (const fragment of response.text) content += fragment;
verbose(`Non-stream complete len=${content.length}`);
writeJson(res, 200, {
id: `cmpl_${Math.random().toString(36).slice(2)}`,
let toolCalls: OpenAIToolCall[] = [];
for await (const part of response.stream) {
if (part && typeof part === 'object' && 'callId' in part && 'name' in part && 'input' in part) {
// Handle VS Code LanguageModelToolCallPart
const toolCallPart = part as vscode.LanguageModelToolCallPart;
const toolCall: OpenAIToolCall = {
id: toolCallPart.callId,
type: 'function',
function: {
name: toolCallPart.name,
arguments: JSON.stringify(toolCallPart.input)
}
};
toolCalls.push(toolCall);
} else if (typeof part === 'string' || (part && typeof part === 'object' && 'value' in part)) {
// Handle text content
content += typeof part === 'string' ? part : (part as any).value || '';
}
}
verbose(`Non-stream complete len=${content.length} tool_calls=${toolCalls.length}`);
const message: OpenAIMessage = {
role: 'assistant',
content: toolCalls.length > 0 ? null : content,
};
// Add tool_calls if present
if (toolCalls.length > 0) {
message.tool_calls = toolCalls;
// For backward compatibility, also add function_call if there's exactly one tool call
if (toolCalls.length === 1 && requestBody?.function_call !== undefined) {
message.function_call = {
name: toolCalls[0].function.name,
arguments: toolCalls[0].function.arguments
};
}
}
const responseObj: OpenAIResponse = {
id: responseId,
object: 'chat.completion',
choices: [
{
index: 0,
message: { role: 'assistant', content },
finish_reason: 'stop',
},
],
});
created,
model: modelName,
choices: [{
index: 0,
message,
finish_reason: toolCalls.length > 0 ? 'tool_calls' : 'stop',
}],
usage: {
prompt_tokens: 0, // VS Code API doesn't provide token counts
completion_tokens: 0,
total_tokens: 0
}
};
writeJson(res, 200, responseObj);
};

View file

@ -14,10 +14,21 @@ export const handleHealthCheck = async (res: ServerResponse, v: boolean): Promis
const unavailableReason = state.modelCache
? undefined
: (!hasLM ? 'missing_language_model_api' : (state.lastReason || 'copilot_model_unavailable'));
writeJson(res, 200, {
ok: true,
status: 'operational',
copilot: state.modelCache ? 'ok' : 'unavailable',
reason: unavailableReason,
version: vscode.version,
features: {
chat_completions: true,
streaming: true,
tool_calling: true,
function_calling: true, // deprecated but supported
models_list: true
},
active_requests: state.activeRequests,
model_attempted: state.modelAttempted
});
};

View file

@ -4,17 +4,29 @@ import type { ServerResponse } from 'http';
export const handleModelsRequest = async (res: ServerResponse): Promise<void> => {
try {
const models = await listCopilotModels();
const modelIds = await listCopilotModels();
const models = modelIds.map((id: string) => ({
id,
object: 'model',
created: Math.floor(Date.now() / 1000),
owned_by: 'copilot',
permission: [],
root: id,
parent: null,
}));
writeJson(res, 200, {
data: models.map((id: string) => ({
id,
object: 'model',
owned_by: 'vscode-bridge',
})),
object: 'list',
data: models,
});
} catch {
writeJson(res, 200, {
data: [],
} catch (e) {
const msg = e instanceof Error ? e.message : String(e);
writeJson(res, 500, {
error: {
message: msg || 'Failed to list models',
type: 'server_error',
code: 'internal_error'
}
});
}
};

View file

@ -1,8 +1,12 @@
import * as vscode from 'vscode';
export interface ChatMessage {
readonly role: 'system' | 'user' | 'assistant';
readonly content: string | MessageContent[];
readonly role: 'system' | 'user' | 'assistant' | 'tool';
readonly content?: string | MessageContent[] | null;
readonly name?: string;
readonly tool_calls?: ToolCall[];
readonly tool_call_id?: string;
readonly function_call?: FunctionCall;
}
export interface MessageContent {
@ -11,22 +15,87 @@ export interface MessageContent {
readonly [key: string]: unknown;
}
export interface ToolCall {
readonly id: string;
readonly type: 'function';
readonly function: FunctionCall;
}
export interface FunctionCall {
readonly name: string;
readonly arguments: string;
}
export interface Tool {
readonly type: 'function';
readonly function: ToolFunction;
}
export interface ToolFunction {
readonly name: string;
readonly description?: string;
readonly parameters?: object;
}
export interface ChatCompletionRequest {
readonly model?: string;
readonly messages: ChatMessage[];
readonly stream?: boolean;
readonly tools?: Tool[];
readonly tool_choice?: 'none' | 'auto' | 'required' | { type: 'function'; function: { name: string } };
readonly parallel_tool_calls?: boolean;
readonly functions?: ToolFunction[]; // Deprecated, use tools instead
readonly function_call?: 'none' | 'auto' | { name: string }; // Deprecated, use tool_choice instead
readonly temperature?: number;
readonly top_p?: number;
readonly n?: number;
readonly stop?: string | string[];
readonly max_tokens?: number;
readonly max_completion_tokens?: number;
readonly presence_penalty?: number;
readonly frequency_penalty?: number;
readonly logit_bias?: Record<string, number>;
readonly logprobs?: boolean;
readonly top_logprobs?: number;
readonly user?: string;
readonly seed?: number;
readonly response_format?: {
readonly type: 'text' | 'json_object' | 'json_schema';
readonly json_schema?: {
readonly name: string;
readonly schema: object;
readonly strict?: boolean;
};
};
readonly [key: string]: unknown;
}
const VALID_ROLES = ['system', 'user', 'assistant'] as const;
const VALID_ROLES = ['system', 'user', 'assistant', 'tool'] as const;
type Role = typeof VALID_ROLES[number];
const isValidRole = (role: unknown): role is Role => typeof role === 'string' && VALID_ROLES.includes(role as Role);
export const isChatMessage = (msg: unknown): msg is ChatMessage => {
if (typeof msg !== 'object' || msg === null) return false;
const candidate = msg as Record<string, unknown>;
if (!('role' in candidate) || !('content' in candidate)) return false;
return isValidRole(candidate.role) && candidate.content !== undefined && candidate.content !== null;
if (!('role' in candidate)) return false;
if (!isValidRole(candidate.role)) return false;
// Tool messages require tool_call_id and content
if (candidate.role === 'tool') {
return typeof candidate.tool_call_id === 'string' &&
(typeof candidate.content === 'string' || candidate.content === null);
}
// Assistant messages can have content and/or tool_calls/function_call
if (candidate.role === 'assistant') {
const hasContent = candidate.content !== undefined;
const hasToolCalls = Array.isArray(candidate.tool_calls);
const hasFunctionCall = typeof candidate.function_call === 'object' && candidate.function_call !== null;
return hasContent || hasToolCalls || hasFunctionCall;
}
// System and user messages must have content
return candidate.content !== undefined && candidate.content !== null;
};
export const isChatCompletionRequest = (body: unknown): body is ChatCompletionRequest => {
@ -37,6 +106,25 @@ export const isChatCompletionRequest = (body: unknown): body is ChatCompletionRe
return Array.isArray(messages) && messages.length > 0 && messages.every(isChatMessage);
};
// Convert OpenAI tools to VS Code Language Model tools
export const convertOpenAIToolsToLM = (tools?: Tool[]): vscode.LanguageModelChatTool[] => {
if (!tools) return [];
return tools.map(tool => ({
name: tool.function.name,
description: tool.function.description || '',
inputSchema: tool.function.parameters
}));
};
// Convert deprecated functions to tools format
export const convertFunctionsToTools = (functions?: ToolFunction[]): Tool[] => {
if (!functions) return [];
return functions.map(func => ({
type: 'function' as const,
function: func
}));
};
const toText = (content: unknown): string => {
if (typeof content === 'string') return content;
if (Array.isArray(content)) return content.map(toText).join('\n');