From d5928a3ec89ca67577f1cda4d31ada61d638d9e6 Mon Sep 17 00:00:00 2001 From: Lars Baunwall Date: Sun, 5 Oct 2025 14:19:45 +0200 Subject: [PATCH] Security and compliance hardening --- README.md | 46 ++++++++++++++++++++++++++++++++++++++- SECURITY.md | 8 +++++++ package.json | 2 +- src/http/routes/health.ts | 6 ++++- 4 files changed, 59 insertions(+), 3 deletions(-) create mode 100644 SECURITY.md diff --git a/README.md b/README.md index 8a61f1d..5adb46e 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,7 @@ Copilot Bridge lets you access your personal Copilot session locally through an OpenAI-compatible interface — **without calling any private GitHub endpoints**. It’s designed for developers experimenting with AI agents, CLI tools, and custom integrations inside their own editor environment. +> **API Surface:** Uses only the public VS Code **Language Model API** (`vscode.lm`) for model discovery and chat. No private Copilot endpoints, tokens, or protocol emulation. --- ## ✨ Key Features @@ -136,8 +137,29 @@ The extension uses VS Code’s built-in Language Model API to select available C Requests are normalized and sent through VS Code itself, never directly to GitHub Copilot servers. Responses stream back via SSE with concurrency controls for editor stability. + +### How it calls models (pseudocode) + +```ts +import * as vscode from "vscode"; + +const models = await vscode.lm.selectChatModels({ + where: { vendor: "copilot", supports: { reasoning: true } } +}); +const model = models[0] ?? (await vscode.lm.selectChatModels({}))[0]; +if (!model) throw new Error("No language models available (vscode.lm)"); + +const stream = await model.sendRequest( + { kind: "chat", messages: [{ role: "user", content: "hello" }] }, + { temperature: 0.2 } +); + +// Stream chunks → SSE to localhost client; no private Copilot protocol used. +``` + --- + ## 🔧 Configuration | Setting | Default | Description | @@ -170,7 +192,7 @@ Responses stream back via SSE with concurrency controls for editor stability. - Loopback-only binding (non-configurable) - Mandatory bearer token gating (requests rejected without the correct header) -- No persistent storage or telemetry +- **Telemetry:** none collected or transmitted. --- @@ -199,3 +221,25 @@ Independent project — not affiliated with GitHub or Microsoft. For compliance or takedown inquiries, please open a GitHub issue. --- + +### ❓ FAQ + +#### Can I run this on a server? +No. Copilot Bridge is designed for **localhost-only**, single-user, interactive use. +Running it on a shared host or exposing it over a network would violate its intended scope and could breach the Copilot terms. +The host is bound to `127.0.0.1` (non-configurable). + +#### Does it send any data to the author? +No. The bridge never transmits telemetry, prompts, or responses to any external service. +All traffic stays on your machine and flows through VS Code’s built-in model interface. + +#### What happens if Copilot is unavailable? +The `/health` endpoint will report a diagnostic reason such as `copilot_unavailable` or `missing_language_model_api`. +This means VS Code currently has no accessible models via `vscode.lm`. Once Copilot becomes available again, the bridge will resume automatically. + +#### Can I use non-Copilot models? +Yes, if other providers register with `vscode.lm`. The bridge will detect any available chat-capable models and use the first suitable one it finds. + +#### How is this different from reverse-engineered Copilot proxies? +Reverse-engineered proxies call private endpoints directly or reuse extracted tokens. +Copilot Bridge does neither—it communicates only through VS Code’s sanctioned **Language Model API**, keeping usage transparent and compliant. \ No newline at end of file diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..26df456 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,8 @@ +# Security & Compliance + +- Uses only the public **VS Code Language Model API** (`vscode.lm`). +- Does **not** call, impersonate, or reverse-engineer private GitHub Copilot endpoints. +- The HTTP server binds to **localhost** by default (non-configurable). +- Mandatory bearer-token auth via `bridge.token`. +- Rate and concurrency limits are available to preserve interactive editor usage. +- No telemetry or prompt/response data is collected or transmitted by the author. \ No newline at end of file diff --git a/package.json b/package.json index 43c3729..f259df1 100644 --- a/package.json +++ b/package.json @@ -12,7 +12,7 @@ }, "author": "larsbaunwall", "engines": { - "vscode": "^1.90.0" + "vscode": "^1.93.0" }, "license": "Apache License 2.0", "extensionKind": [ diff --git a/src/http/routes/health.ts b/src/http/routes/health.ts index 2da6ba8..84ee2eb 100644 --- a/src/http/routes/health.ts +++ b/src/http/routes/health.ts @@ -7,6 +7,8 @@ import { verbose } from '../../log'; interface HealthResponse { readonly ok: boolean; + readonly api: string; + readonly notes: string; readonly status: string; readonly copilot: string; readonly reason?: string; @@ -27,7 +29,7 @@ export const handleHealthCheck = async (res: ServerResponse, v: boolean): Promis // Attempt model resolution if cache is empty and verbose logging is enabled if (!state.modelCache && v) { - verbose(`Healthz: model=${state.modelCache ? 'present' : 'missing'} lmApi=${hasLM ? 'ok' : 'missing'}`); + verbose(`Health: model=${state.modelCache ? 'present' : 'missing'} lmApi=${hasLM ? 'ok' : 'missing'}`); try { await getModel(); } catch (e) { @@ -42,6 +44,8 @@ export const handleHealthCheck = async (res: ServerResponse, v: boolean): Promis const response: HealthResponse = { ok: true, + api: hasLM ? 'vscode.lm' : 'missing_language_model_api', + notes: "No direct Copilot endpoints; no token extraction", status: 'operational', copilot: state.modelCache ? 'ok' : 'unavailable', reason: unavailableReason,