|
|
|
@ -17,7 +17,7 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
|
|
|
|
|
event: EventEmitter |
|
|
|
|
test_env=false |
|
|
|
|
test_url="http://solcodertest.org/" |
|
|
|
|
test_url="http://solcodertest.org" |
|
|
|
|
|
|
|
|
|
constructor(apiUrl?:string, completionUrl?:string) { |
|
|
|
|
this.api_url = apiUrl!==undefined ? apiUrl: this.test_env? this.test_url : "https://solcoder.remixproject.org" |
|
|
|
@ -42,6 +42,7 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
} |
|
|
|
|
case AIRequestType.GENERAL: |
|
|
|
|
if (result.statusText === "OK") { |
|
|
|
|
if (result.data?.error) return result.data?.error |
|
|
|
|
const resultText = result.data.generatedText |
|
|
|
|
ChatHistory.pushHistory(payload.prompt, resultText) |
|
|
|
|
return resultText |
|
|
|
@ -75,11 +76,9 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
if (payload.return_stream_response) { |
|
|
|
|
return response |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
const reader = response.body!.getReader(); |
|
|
|
|
const decoder = new TextDecoder(); |
|
|
|
|
const parser = new JsonStreamParser(); |
|
|
|
|
|
|
|
|
|
while (true) { |
|
|
|
|
const { done, value } = await reader.read(); |
|
|
|
|
if (done) break; |
|
|
|
@ -87,7 +86,6 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
try { |
|
|
|
|
console.log("value" + decoder.decode(value)) |
|
|
|
|
const chunk = parser.safeJsonParse<{ generatedText: string; isGenerating: boolean }>(decoder.decode(value, { stream: true })); |
|
|
|
|
|
|
|
|
|
for (const parsedData of chunk) { |
|
|
|
|
if (parsedData.isGenerating) { |
|
|
|
|
this.event.emit('onStreamResult', parsedData.generatedText); |
|
|
|
@ -123,14 +121,14 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
|
|
|
|
|
async code_insertion(msg_pfx, msg_sfx, options:IParams=InsertionParams): Promise<any> { |
|
|
|
|
// const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]}
|
|
|
|
|
const payload = {"endpoint":"code_insertion", msg_pfx, msg_sfx, ...options, prompt: '' } |
|
|
|
|
const payload = { "endpoint":"code_insertion", msg_pfx, msg_sfx, ...options, prompt: '' } |
|
|
|
|
return this._makeRequest(payload, AIRequestType.COMPLETION) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
async code_generation(prompt, options:IParams=GenerationParams): Promise<any> { |
|
|
|
|
// const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]}
|
|
|
|
|
const payload = { prompt, "endpoint":"code_completion", ...options } |
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION) |
|
|
|
|
else return this._makeRequest(payload, AIRequestType.COMPLETION) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -138,20 +136,20 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
|
const main_prompt = buildSolgptPromt(prompt, this.model_op) |
|
|
|
|
// const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]}
|
|
|
|
|
const payload = { 'prompt': main_prompt, "endpoint":"solidity_answer", ...options } |
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL) |
|
|
|
|
else return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
async code_explaining(prompt, context:string="", options:IParams=GenerationParams): Promise<any> { |
|
|
|
|
// const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}
|
|
|
|
|
const payload = { prompt, "endpoint":"code_explaining", context, ...options } |
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL) |
|
|
|
|
else return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
async error_explaining(prompt, options:IParams=GenerationParams): Promise<any> { |
|
|
|
|
const payload = { prompt, "endpoint":"error_explaining", ...options } |
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL)
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL) |
|
|
|
|
else return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|