[PROTOBUS] Move requestVsCodeLmModels to protobus (#3344)

* Task Favorites

* getOllamaModels protobus migration

* VsCodeLmModels protobus migration

* cleanup
This commit is contained in:
canvrno 2025-05-08 13:50:23 -07:00 committed by GitHub
parent 489a05117c
commit 445e25221a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 302 additions and 34 deletions

View File

@ -0,0 +1,5 @@
---
"claude-dev": patch
---
requestVsCodeLmModels protobus migration

View File

@ -10,7 +10,21 @@ import "common.proto";
service ModelsService {
// Fetches available models from Ollama
rpc getOllamaModels(StringRequest) returns (StringArray);
// Fetches available models from LM Studio
rpc getLmStudioModels(StringRequest) returns (StringArray);
// Fetches available models from VS Code LM API
rpc getVsCodeLmModels(EmptyRequest) returns (VsCodeLmModelsArray);
}
// List of VS Code LM models
message VsCodeLmModelsArray {
repeated VsCodeLmModel models = 1;
}
// Structure representing a VS Code LM model
message VsCodeLmModel {
string vendor = 1;
string family = 2;
string version = 3;
string id = 4;
}

View File

@ -27,4 +27,3 @@ message NewTaskRequest {
string text = 2;
repeated string images = 3;
}

View File

@ -19,7 +19,6 @@ import WorkspaceTracker from "@integrations/workspace/WorkspaceTracker"
import { ClineAccountService } from "@services/account/ClineAccountService"
import { BrowserSession } from "@services/browser/BrowserSession"
import { McpHub } from "@services/mcp/McpHub"
import { searchWorkspaceFiles } from "@services/search/file-search"
import { telemetryService } from "@/services/posthog/telemetry/TelemetryService"
import { ApiProvider, ModelInfo } from "@shared/api"
import { ChatContent } from "@shared/ChatContent"
@ -28,11 +27,10 @@ import { ExtensionMessage, ExtensionState, Invoke, Platform } from "@shared/Exte
import { HistoryItem } from "@shared/HistoryItem"
import { McpDownloadResponse, McpMarketplaceCatalog, McpServer } from "@shared/mcp"
import { TelemetrySetting } from "@shared/TelemetrySetting"
import { ClineCheckpointRestore, WebviewMessage } from "@shared/WebviewMessage"
import { WebviewMessage } from "@shared/WebviewMessage"
import { fileExistsAtPath } from "@utils/fs"
import { searchCommits, getWorkingState } from "@utils/git"
import { getWorkingState } from "@utils/git"
import { extractCommitMessage } from "@integrations/git/commit-message-generator"
import { getWorkspacePath } from "@utils/path"
import { getTotalTasksSize } from "@utils/storage"
import { openMention } from "../mentions"
import { ensureMcpServersDirectoryExists, ensureSettingsDirectoryExists, GlobalFileNames } from "../storage/disk"
@ -332,10 +330,6 @@ export class Controller {
case "resetState":
await this.resetState()
break
case "requestVsCodeLmModels":
const vsCodeLmModels = await this.getVsCodeLmModels()
this.postMessageToWebview({ type: "vsCodeLmModels", vsCodeLmModels })
break
case "refreshOpenRouterModels":
await this.refreshOpenRouterModels()
break
@ -884,18 +878,6 @@ export class Controller {
}
}
// VSCode LM API
private async getVsCodeLmModels() {
try {
const models = await vscode.lm.selectChatModels({})
return models || []
} catch (error) {
console.error("Error fetching VS Code LM models:", error)
return []
}
}
// Account
async fetchUserCreditsData() {

View File

@ -0,0 +1,24 @@
import { Controller } from ".."
import { EmptyRequest } from "../../../shared/proto/common"
import { VsCodeLmModelsArray } from "../../../shared/proto/models"
import * as vscode from "vscode"
import { convertVsCodeNativeModelsToProtoModels } from "../../../shared/proto-conversions/models/vscode-lm-models-conversion"
/**
* Fetches available models from VS Code LM API
* @param controller The controller instance
* @param request Empty request
* @returns Array of VS Code LM models
*/
export async function getVsCodeLmModels(controller: Controller, request: EmptyRequest): Promise<VsCodeLmModelsArray> {
try {
const models = await vscode.lm.selectChatModels({})
const protoModels = convertVsCodeNativeModelsToProtoModels(models || [])
return VsCodeLmModelsArray.create({ models: protoModels })
} catch (error) {
console.error("Error fetching VS Code LM models:", error)
return VsCodeLmModelsArray.create({ models: [] })
}
}

View File

@ -5,10 +5,12 @@
import { registerMethod } from "./index"
import { getLmStudioModels } from "./getLmStudioModels"
import { getOllamaModels } from "./getOllamaModels"
import { getVsCodeLmModels } from "./getVsCodeLmModels"
// Register all models service methods
export function registerAllMethods(): void {
// Register each method with the registry
registerMethod("getLmStudioModels", getLmStudioModels)
registerMethod("getOllamaModels", getOllamaModels)
registerMethod("getVsCodeLmModels", getVsCodeLmModels)
}

View File

@ -28,8 +28,6 @@ export interface ExtensionMessage {
| "requestyModels"
| "mcpServers"
| "relinquishControl"
| "vsCodeLmModels"
| "requestVsCodeLmModels"
| "authCallback"
| "mcpMarketplaceCatalog"
| "mcpDownloadDetails"

View File

@ -0,0 +1,23 @@
import { VsCodeLmModel } from "../../proto/models"
/**
* Represents a VS Code language model in the native VS Code format
*/
export interface VsCodeNativeModel {
vendor?: string
family?: string
version?: string
id?: string
}
/**
* Converts VS Code native model format to protobuf format
*/
export function convertVsCodeNativeModelsToProtoModels(models: VsCodeNativeModel[]): VsCodeLmModel[] {
return (models || []).map((model) => ({
vendor: model.vendor || "",
family: model.family || "",
version: model.version || "",
id: model.id || "",
}))
}

View File

@ -5,10 +5,192 @@
// source: models.proto
/* eslint-disable */
import { StringArray, StringRequest } from "./common"
import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"
import { EmptyRequest, StringArray, StringRequest } from "./common"
export const protobufPackage = "cline"
/** List of VS Code LM models */
export interface VsCodeLmModelsArray {
models: VsCodeLmModel[]
}
/** Structure representing a VS Code LM model */
export interface VsCodeLmModel {
vendor: string
family: string
version: string
id: string
}
function createBaseVsCodeLmModelsArray(): VsCodeLmModelsArray {
return { models: [] }
}
export const VsCodeLmModelsArray: MessageFns<VsCodeLmModelsArray> = {
encode(message: VsCodeLmModelsArray, writer: BinaryWriter = new BinaryWriter()): BinaryWriter {
for (const v of message.models) {
VsCodeLmModel.encode(v!, writer.uint32(10).fork()).join()
}
return writer
},
decode(input: BinaryReader | Uint8Array, length?: number): VsCodeLmModelsArray {
const reader = input instanceof BinaryReader ? input : new BinaryReader(input)
let end = length === undefined ? reader.len : reader.pos + length
const message = createBaseVsCodeLmModelsArray()
while (reader.pos < end) {
const tag = reader.uint32()
switch (tag >>> 3) {
case 1: {
if (tag !== 10) {
break
}
message.models.push(VsCodeLmModel.decode(reader, reader.uint32()))
continue
}
}
if ((tag & 7) === 4 || tag === 0) {
break
}
reader.skip(tag & 7)
}
return message
},
fromJSON(object: any): VsCodeLmModelsArray {
return {
models: globalThis.Array.isArray(object?.models) ? object.models.map((e: any) => VsCodeLmModel.fromJSON(e)) : [],
}
},
toJSON(message: VsCodeLmModelsArray): unknown {
const obj: any = {}
if (message.models?.length) {
obj.models = message.models.map((e) => VsCodeLmModel.toJSON(e))
}
return obj
},
create<I extends Exact<DeepPartial<VsCodeLmModelsArray>, I>>(base?: I): VsCodeLmModelsArray {
return VsCodeLmModelsArray.fromPartial(base ?? ({} as any))
},
fromPartial<I extends Exact<DeepPartial<VsCodeLmModelsArray>, I>>(object: I): VsCodeLmModelsArray {
const message = createBaseVsCodeLmModelsArray()
message.models = object.models?.map((e) => VsCodeLmModel.fromPartial(e)) || []
return message
},
}
function createBaseVsCodeLmModel(): VsCodeLmModel {
return { vendor: "", family: "", version: "", id: "" }
}
export const VsCodeLmModel: MessageFns<VsCodeLmModel> = {
encode(message: VsCodeLmModel, writer: BinaryWriter = new BinaryWriter()): BinaryWriter {
if (message.vendor !== "") {
writer.uint32(10).string(message.vendor)
}
if (message.family !== "") {
writer.uint32(18).string(message.family)
}
if (message.version !== "") {
writer.uint32(26).string(message.version)
}
if (message.id !== "") {
writer.uint32(34).string(message.id)
}
return writer
},
decode(input: BinaryReader | Uint8Array, length?: number): VsCodeLmModel {
const reader = input instanceof BinaryReader ? input : new BinaryReader(input)
let end = length === undefined ? reader.len : reader.pos + length
const message = createBaseVsCodeLmModel()
while (reader.pos < end) {
const tag = reader.uint32()
switch (tag >>> 3) {
case 1: {
if (tag !== 10) {
break
}
message.vendor = reader.string()
continue
}
case 2: {
if (tag !== 18) {
break
}
message.family = reader.string()
continue
}
case 3: {
if (tag !== 26) {
break
}
message.version = reader.string()
continue
}
case 4: {
if (tag !== 34) {
break
}
message.id = reader.string()
continue
}
}
if ((tag & 7) === 4 || tag === 0) {
break
}
reader.skip(tag & 7)
}
return message
},
fromJSON(object: any): VsCodeLmModel {
return {
vendor: isSet(object.vendor) ? globalThis.String(object.vendor) : "",
family: isSet(object.family) ? globalThis.String(object.family) : "",
version: isSet(object.version) ? globalThis.String(object.version) : "",
id: isSet(object.id) ? globalThis.String(object.id) : "",
}
},
toJSON(message: VsCodeLmModel): unknown {
const obj: any = {}
if (message.vendor !== "") {
obj.vendor = message.vendor
}
if (message.family !== "") {
obj.family = message.family
}
if (message.version !== "") {
obj.version = message.version
}
if (message.id !== "") {
obj.id = message.id
}
return obj
},
create<I extends Exact<DeepPartial<VsCodeLmModel>, I>>(base?: I): VsCodeLmModel {
return VsCodeLmModel.fromPartial(base ?? ({} as any))
},
fromPartial<I extends Exact<DeepPartial<VsCodeLmModel>, I>>(object: I): VsCodeLmModel {
const message = createBaseVsCodeLmModel()
message.vendor = object.vendor ?? ""
message.family = object.family ?? ""
message.version = object.version ?? ""
message.id = object.id ?? ""
return message
},
}
/** Service for model-related operations */
export type ModelsServiceDefinition = typeof ModelsServiceDefinition
export const ModelsServiceDefinition = {
@ -33,5 +215,44 @@ export const ModelsServiceDefinition = {
responseStream: false,
options: {},
},
/** Fetches available models from VS Code LM API */
getVsCodeLmModels: {
name: "getVsCodeLmModels",
requestType: EmptyRequest,
requestStream: false,
responseType: VsCodeLmModelsArray,
responseStream: false,
options: {},
},
},
} as const
type Builtin = Date | Function | Uint8Array | string | number | boolean | undefined
export type DeepPartial<T> = T extends Builtin
? T
: T extends globalThis.Array<infer U>
? globalThis.Array<DeepPartial<U>>
: T extends ReadonlyArray<infer U>
? ReadonlyArray<DeepPartial<U>>
: T extends {}
? { [K in keyof T]?: DeepPartial<T[K]> }
: Partial<T>
type KeysOfUnion<T> = T extends T ? keyof T : never
export type Exact<P, I extends P> = P extends Builtin
? P
: P & { [K in keyof P]: Exact<P[K], I[K]> } & { [K in Exclude<keyof I, KeysOfUnion<P>>]: never }
function isSet(value: any): boolean {
return value !== null && value !== undefined
}
export interface MessageFns<T> {
encode(message: T, writer?: BinaryWriter): BinaryWriter
decode(input: BinaryReader | Uint8Array, length?: number): T
fromJSON(object: any): T
toJSON(message: T): unknown
create<I extends Exact<DeepPartial<T>, I>>(base?: I): T
fromPartial<I extends Exact<DeepPartial<T>, I>>(object: I): T
}

View File

@ -209,7 +209,15 @@ const ApiOptions = ({
setLmStudioModels([])
}
} else if (selectedProvider === "vscode-lm") {
vscode.postMessage({ type: "requestVsCodeLmModels" })
try {
const response = await ModelsServiceClient.getVsCodeLmModels({})
if (response && response.models) {
setVsCodeLmModels(response.models)
}
} catch (error) {
console.error("Failed to fetch VS Code LM models:", error)
setVsCodeLmModels([])
}
}
}, [selectedProvider, apiConfiguration?.ollamaBaseUrl, apiConfiguration?.lmStudioBaseUrl])
useEffect(() => {
@ -222,14 +230,6 @@ const ApiOptions = ({
selectedProvider === "ollama" || selectedProvider === "lmstudio" || selectedProvider === "vscode-lm" ? 2000 : null,
)
const handleMessage = useCallback((event: MessageEvent) => {
const message: ExtensionMessage = event.data
if (message.type === "vsCodeLmModels" && message.vsCodeLmModels) {
setVsCodeLmModels(message.vsCodeLmModels)
}
}, [])
useEvent("message", handleMessage)
/*
VSCodeDropdown has an open bug where dynamically rendered options don't auto select the provided value prop. You can see this for yourself by comparing it with normal select/option elements, which work as expected.
https://github.com/microsoft/vscode-webview-ui-toolkit/issues/433