|
|
@ -1,8 +1,10 @@ |
|
|
|
import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types"; |
|
|
|
import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types"; |
|
|
|
|
|
|
|
import { GenerationParams, CompletionParams, InsertionParams } from "../../types/models"; |
|
|
|
import { buildSolgptPromt } from "../../prompts/promptBuilder"; |
|
|
|
import { buildSolgptPromt } from "../../prompts/promptBuilder"; |
|
|
|
import axios from "axios"; |
|
|
|
|
|
|
|
import EventEmitter from "events"; |
|
|
|
import EventEmitter from "events"; |
|
|
|
import { ChatHistory } from "../../prompts/chat"; |
|
|
|
import { ChatHistory } from "../../prompts/chat"; |
|
|
|
|
|
|
|
import axios, { AxiosResponse } from 'axios'; |
|
|
|
|
|
|
|
import { Readable } from 'stream'; |
|
|
|
|
|
|
|
|
|
|
|
const defaultErrorMessage = `Unable to get a response from AI server` |
|
|
|
const defaultErrorMessage = `Unable to get a response from AI server` |
|
|
|
|
|
|
|
|
|
|
@ -12,38 +14,34 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
max_history = 7 |
|
|
|
max_history = 7 |
|
|
|
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
|
|
|
|
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
|
|
|
|
event: EventEmitter |
|
|
|
event: EventEmitter |
|
|
|
|
|
|
|
test_env=true |
|
|
|
|
|
|
|
|
|
|
|
constructor(apiUrl?:string, completionUrl?:string) { |
|
|
|
constructor(apiUrl?:string, completionUrl?:string) { |
|
|
|
this.api_url = apiUrl!==undefined ? apiUrl: "https://solcoder.remixproject.org" |
|
|
|
this.api_url = apiUrl!==undefined ? apiUrl: this.test_env? "http://127.0.0.1:7861/" : "https://solcoder.remixproject.org" |
|
|
|
this.completion_url = completionUrl!==undefined ? completionUrl : "https://completion.remixproject.org" |
|
|
|
this.completion_url = completionUrl!==undefined ? completionUrl : this.test_env? "http://127.0.0.1:7861/" : "https://completion.remixproject.org" |
|
|
|
this.event = new EventEmitter() |
|
|
|
this.event = new EventEmitter() |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
private async _makeRequest(data, rType:AIRequestType){ |
|
|
|
private async _makeRequest(endpoint, payload, rType:AIRequestType){ |
|
|
|
this.event.emit("onInference") |
|
|
|
this.event.emit("onInference") |
|
|
|
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
|
|
|
let requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
|
|
|
const userPrompt = data.data[0] |
|
|
|
const userPrompt = payload.prompt |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
console.log(requesURL) |
|
|
|
try { |
|
|
|
try { |
|
|
|
const result = await axios(requesURL, { |
|
|
|
const options = { headers: { 'Content-Type': 'application/json', } } |
|
|
|
method: 'POST', |
|
|
|
const result = await axios.post(`${requesURL}/${endpoint}`, payload, options) |
|
|
|
headers: { |
|
|
|
|
|
|
|
Accept: 'application/json', |
|
|
|
|
|
|
|
'Content-Type': 'application/json', |
|
|
|
|
|
|
|
}, |
|
|
|
|
|
|
|
data: JSON.stringify(data), |
|
|
|
|
|
|
|
}) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
switch (rType) { |
|
|
|
switch (rType) { |
|
|
|
case AIRequestType.COMPLETION: |
|
|
|
case AIRequestType.COMPLETION: |
|
|
|
if (result.statusText === "OK") |
|
|
|
if (result.statusText === "OK") |
|
|
|
return result.data.data[0] |
|
|
|
return result.data.generatedText |
|
|
|
else { |
|
|
|
else { |
|
|
|
return defaultErrorMessage |
|
|
|
return defaultErrorMessage |
|
|
|
} |
|
|
|
} |
|
|
|
case AIRequestType.GENERAL: |
|
|
|
case AIRequestType.GENERAL: |
|
|
|
if (result.statusText === "OK") { |
|
|
|
if (result.statusText === "OK") { |
|
|
|
const resultText = result.data.data[0] |
|
|
|
const resultText = result.data.generatedText |
|
|
|
ChatHistory.pushHistory(userPrompt, resultText) |
|
|
|
ChatHistory.pushHistory(userPrompt, resultText) |
|
|
|
return resultText |
|
|
|
return resultText |
|
|
|
} else { |
|
|
|
} else { |
|
|
@ -54,33 +52,35 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
} catch (e) { |
|
|
|
} catch (e) { |
|
|
|
ChatHistory.clearHistory() |
|
|
|
ChatHistory.clearHistory() |
|
|
|
console.error('Error making request to Inference server:', e.message) |
|
|
|
console.error('Error making request to Inference server:', e.message) |
|
|
|
return e |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
finally { |
|
|
|
finally { |
|
|
|
this.event.emit("onInferenceDone") |
|
|
|
this.event.emit("onInferenceDone") |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
private async _streamInferenceRequest(data, rType:AIRequestType){ |
|
|
|
private async _streamInferenceRequest(endpoint, payload, rType:AIRequestType){ |
|
|
|
|
|
|
|
let resultText = "" |
|
|
|
try { |
|
|
|
try { |
|
|
|
this.event.emit('onInference') |
|
|
|
this.event.emit('onInference') |
|
|
|
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
|
|
|
const requestURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
|
|
|
const userPrompt = data.data[0] |
|
|
|
const userPrompt = payload.prompt |
|
|
|
const response = await axios({ |
|
|
|
const response:AxiosResponse<Readable> = await axios({ |
|
|
|
method: 'post', |
|
|
|
method: 'post', |
|
|
|
url: requesURL, |
|
|
|
url: `${requestURL}/${endpoint}`, |
|
|
|
data: data, |
|
|
|
data: payload, |
|
|
|
headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" }, |
|
|
|
headers: { |
|
|
|
responseType: 'stream' |
|
|
|
"Content-Type": "application/json", |
|
|
|
}); |
|
|
|
"Accept": "text/event-stream", |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
, responseType: 'blob' }); |
|
|
|
|
|
|
|
|
|
|
|
let resultText = "" |
|
|
|
|
|
|
|
response.data.on('data', (chunk: Buffer) => { |
|
|
|
response.data.on('data', (chunk: Buffer) => { |
|
|
|
try { |
|
|
|
try { |
|
|
|
const parsedData = JSON.parse(chunk.toString()); |
|
|
|
const parsedData = JSON.parse(chunk.toString()); |
|
|
|
if (parsedData.isGenerating) { |
|
|
|
if (parsedData.isGenerating) { |
|
|
|
this.event.emit('onStreamResult', parsedData.generatedText); |
|
|
|
this.event.emit('onStreamResult', parsedData.generatedText); |
|
|
|
resultText = resultText + parsedData.generatedText |
|
|
|
resultText = resultText + parsedData.generatedText |
|
|
|
|
|
|
|
console.log("resultText" + resultText) |
|
|
|
} else { |
|
|
|
} else { |
|
|
|
// stream generation is complete
|
|
|
|
// stream generation is complete
|
|
|
|
resultText = resultText + parsedData.generatedText |
|
|
|
resultText = resultText + parsedData.generatedText |
|
|
@ -99,43 +99,49 @@ export class RemoteInferencer implements ICompletions { |
|
|
|
console.error('Error making stream request to Inference server:', error.message); |
|
|
|
console.error('Error making stream request to Inference server:', error.message); |
|
|
|
} |
|
|
|
} |
|
|
|
finally { |
|
|
|
finally { |
|
|
|
|
|
|
|
console.log("end streamin" + resultText) |
|
|
|
this.event.emit('onInferenceDone') |
|
|
|
this.event.emit('onInferenceDone') |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async code_completion(prompt, options:IParams=null): Promise<any> { |
|
|
|
async code_completion(prompt, options:IParams=CompletionParams): Promise<any> { |
|
|
|
const payload = !options? |
|
|
|
const payload = { prompt, "endpoint":"code_completion", ...options } |
|
|
|
{ "data": [prompt, "code_completion", "", false, 30, 0.9, 0.90, 50]} : |
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
|
|
|
|
{ "data": [prompt, "code_completion", "", options.stream_result, |
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) |
|
|
|
options.max_new_tokens, options.temperature, options.top_p, options.top_k] |
|
|
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return this._makeRequest(payload, AIRequestType.COMPLETION) |
|
|
|
|
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async code_insertion(msg_pfx, msg_sfx): Promise<any> { |
|
|
|
async code_insertion(msg_pfx, msg_sfx, options:IParams=InsertionParams): Promise<any> { |
|
|
|
const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]} |
|
|
|
// const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]}
|
|
|
|
return this._makeRequest(payload, AIRequestType.COMPLETION) |
|
|
|
const payload = { prompt, "endpoint":"code_insertion", msg_pfx, msg_sfx, ...options } |
|
|
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
|
|
|
|
|
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async code_generation(prompt): Promise<any> { |
|
|
|
async code_generation(prompt, options:IParams=GenerationParams): Promise<any> { |
|
|
|
const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]} |
|
|
|
// const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]}
|
|
|
|
return this._makeRequest(payload, AIRequestType.COMPLETION) |
|
|
|
const payload = { prompt, "endpoint":"code_completion", ...options } |
|
|
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
|
|
|
|
|
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async solidity_answer(prompt): Promise<any> { |
|
|
|
async solidity_answer(prompt, options:IParams=GenerationParams): Promise<any> { |
|
|
|
const main_prompt = buildSolgptPromt(prompt, this.model_op) |
|
|
|
const main_prompt = buildSolgptPromt(prompt, this.model_op) |
|
|
|
const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]} |
|
|
|
// const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]}
|
|
|
|
return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
const payload = { prompt, "endpoint":"solidity_answer", ...options } |
|
|
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
|
|
|
|
|
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async code_explaining(prompt, context:string=""): Promise<any> { |
|
|
|
async code_explaining(prompt, context:string="", options:IParams=GenerationParams): Promise<any> { |
|
|
|
const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]} |
|
|
|
// const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}
|
|
|
|
return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
const payload = { prompt, "endpoint":"code_explaining", context, ...options } |
|
|
|
|
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
|
|
|
|
|
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
async error_explaining(prompt): Promise<any> { |
|
|
|
async error_explaining(prompt, options:IParams=GenerationParams): Promise<any> { |
|
|
|
const payload = { "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]} |
|
|
|
const payload = { prompt, "endpoint":"error_explaining", ...options } |
|
|
|
return this._makeRequest(payload, AIRequestType.GENERAL) |
|
|
|
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL)
|
|
|
|
|
|
|
|
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|