From 1e68ae058a6cab8fcd6adc85c66531d8fc1a768f Mon Sep 17 00:00:00 2001 From: STetsing <41009393+STetsing@users.noreply.github.com> Date: Mon, 30 Sep 2024 18:56:34 +0200 Subject: [PATCH] initiall --- .../src/app/plugins/remixAIPlugin.tsx | 12 +- .../src/lib/InferenceServerManager.ts | 4 +- .../src/inferencers/remote/remoteInference.ts | 106 +++++++++--------- libs/remix-ai-core/src/types/models.ts | 1 + libs/remix-ai-core/src/types/types.ts | 2 +- .../remix-ai/src/lib/components/personas.tsx | 8 ++ .../remix-ai/src/lib/components/send.ts | 47 ++++++++ 7 files changed, 123 insertions(+), 57 deletions(-) create mode 100644 libs/remix-ui/remix-ai/src/lib/components/personas.tsx create mode 100644 libs/remix-ui/remix-ai/src/lib/components/send.ts diff --git a/apps/remix-ide/src/app/plugins/remixAIPlugin.tsx b/apps/remix-ide/src/app/plugins/remixAIPlugin.tsx index 52f236f159..667bbd8f76 100644 --- a/apps/remix-ide/src/app/plugins/remixAIPlugin.tsx +++ b/apps/remix-ide/src/app/plugins/remixAIPlugin.tsx @@ -3,7 +3,7 @@ import { ViewPlugin } from '@remixproject/engine-web' import { Plugin } from '@remixproject/engine'; import { RemixAITab } from '@remix-ui/remix-ai' import React from 'react'; -import { ICompletions, IModel, RemoteInferencer, IRemoteModel } from '@remix/remix-ai-core'; +import { ICompletions, IModel, RemoteInferencer, IRemoteModel, IParams, GenerationParams } from '@remix/remix-ai-core'; const profile = { name: 'remixAI', @@ -127,7 +127,8 @@ export class RemixAIPlugin extends ViewPlugin { this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) return } - + const params:IParams = GenerationParams + params.stream_result = true this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) let result @@ -135,7 +136,7 @@ export class RemixAIPlugin extends ViewPlugin { result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt) } else { - result = await this.remoteInferencer.code_explaining(prompt) + result = await this.remoteInferencer.code_explaining(prompt, "", params) } if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) // this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" }) @@ -148,13 +149,16 @@ export class RemixAIPlugin extends ViewPlugin { return } + const params:IParams = GenerationParams + params.stream_result = true + this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) let result if (this.isOnDesktop) { result = await this.call(this.remixDesktopPluginName, 'error_explaining', prompt) } else { - result = await this.remoteInferencer.error_explaining(prompt) + result = await this.remoteInferencer.error_explaining(prompt, params) } if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) // this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" }) diff --git a/apps/remixdesktop/src/lib/InferenceServerManager.ts b/apps/remixdesktop/src/lib/InferenceServerManager.ts index 65ea23696e..87d1ae7781 100644 --- a/apps/remixdesktop/src/lib/InferenceServerManager.ts +++ b/apps/remixdesktop/src/lib/InferenceServerManager.ts @@ -484,9 +484,9 @@ export class InferenceManager implements ICompletions { return } if (params.stream_result) { - return this._streamInferenceRequest('code_explaining', { code, context, ...params }) + return this._streamInferenceRequest('code_explaining', { prompt: code, context, ...params }) } else { - return this._makeInferenceRequest('code_explaining', { code, context, ...params }, AIRequestType.GENERAL) + return this._makeInferenceRequest('code_explaining', { prompt: code, context, ...params }, AIRequestType.GENERAL) } } diff --git a/libs/remix-ai-core/src/inferencers/remote/remoteInference.ts b/libs/remix-ai-core/src/inferencers/remote/remoteInference.ts index 0c37cf751a..09c2b1927d 100644 --- a/libs/remix-ai-core/src/inferencers/remote/remoteInference.ts +++ b/libs/remix-ai-core/src/inferencers/remote/remoteInference.ts @@ -1,8 +1,10 @@ import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types"; +import { GenerationParams, CompletionParams, InsertionParams } from "../../types/models"; import { buildSolgptPromt } from "../../prompts/promptBuilder"; -import axios from "axios"; import EventEmitter from "events"; import { ChatHistory } from "../../prompts/chat"; +import axios, { AxiosResponse } from 'axios'; +import { Readable } from 'stream'; const defaultErrorMessage = `Unable to get a response from AI server` @@ -12,38 +14,34 @@ export class RemoteInferencer implements ICompletions { max_history = 7 model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary event: EventEmitter + test_env=true constructor(apiUrl?:string, completionUrl?:string) { - this.api_url = apiUrl!==undefined ? apiUrl: "https://solcoder.remixproject.org" - this.completion_url = completionUrl!==undefined ? completionUrl : "https://completion.remixproject.org" + this.api_url = apiUrl!==undefined ? apiUrl: this.test_env? "http://127.0.0.1:7861/" : "https://solcoder.remixproject.org" + this.completion_url = completionUrl!==undefined ? completionUrl : this.test_env? "http://127.0.0.1:7861/" : "https://completion.remixproject.org" this.event = new EventEmitter() } - private async _makeRequest(data, rType:AIRequestType){ + private async _makeRequest(endpoint, payload, rType:AIRequestType){ this.event.emit("onInference") - const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url - const userPrompt = data.data[0] + let requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url + const userPrompt = payload.prompt + console.log(requesURL) try { - const result = await axios(requesURL, { - method: 'POST', - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - }, - data: JSON.stringify(data), - }) + const options = { headers: { 'Content-Type': 'application/json', } } + const result = await axios.post(`${requesURL}/${endpoint}`, payload, options) switch (rType) { case AIRequestType.COMPLETION: if (result.statusText === "OK") - return result.data.data[0] + return result.data.generatedText else { return defaultErrorMessage } case AIRequestType.GENERAL: if (result.statusText === "OK") { - const resultText = result.data.data[0] + const resultText = result.data.generatedText ChatHistory.pushHistory(userPrompt, resultText) return resultText } else { @@ -54,33 +52,35 @@ export class RemoteInferencer implements ICompletions { } catch (e) { ChatHistory.clearHistory() console.error('Error making request to Inference server:', e.message) - return e } finally { this.event.emit("onInferenceDone") } } - private async _streamInferenceRequest(data, rType:AIRequestType){ + private async _streamInferenceRequest(endpoint, payload, rType:AIRequestType){ + let resultText = "" try { this.event.emit('onInference') - const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url - const userPrompt = data.data[0] - const response = await axios({ + const requestURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url + const userPrompt = payload.prompt + const response:AxiosResponse = await axios({ method: 'post', - url: requesURL, - data: data, - headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" }, - responseType: 'stream' - }); + url: `${requestURL}/${endpoint}`, + data: payload, + headers: { + "Content-Type": "application/json", + "Accept": "text/event-stream", + } + , responseType: 'blob' }); - let resultText = "" response.data.on('data', (chunk: Buffer) => { try { const parsedData = JSON.parse(chunk.toString()); if (parsedData.isGenerating) { this.event.emit('onStreamResult', parsedData.generatedText); resultText = resultText + parsedData.generatedText + console.log("resultText" + resultText) } else { // stream generation is complete resultText = resultText + parsedData.generatedText @@ -99,43 +99,49 @@ export class RemoteInferencer implements ICompletions { console.error('Error making stream request to Inference server:', error.message); } finally { + console.log("end streamin" + resultText) this.event.emit('onInferenceDone') } } - async code_completion(prompt, options:IParams=null): Promise { - const payload = !options? - { "data": [prompt, "code_completion", "", false, 30, 0.9, 0.90, 50]} : - { "data": [prompt, "code_completion", "", options.stream_result, - options.max_new_tokens, options.temperature, options.top_p, options.top_k] - } - - return this._makeRequest(payload, AIRequestType.COMPLETION) + async code_completion(prompt, options:IParams=CompletionParams): Promise { + const payload = { prompt, "endpoint":"code_completion", ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) } - async code_insertion(msg_pfx, msg_sfx): Promise { - const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]} - return this._makeRequest(payload, AIRequestType.COMPLETION) + async code_insertion(msg_pfx, msg_sfx, options:IParams=InsertionParams): Promise { + // const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]} + const payload = { prompt, "endpoint":"code_insertion", msg_pfx, msg_sfx, ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) } - async code_generation(prompt): Promise { - const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]} - return this._makeRequest(payload, AIRequestType.COMPLETION) + async code_generation(prompt, options:IParams=GenerationParams): Promise { + // const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]} + const payload = { prompt, "endpoint":"code_completion", ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION) } - async solidity_answer(prompt): Promise { + async solidity_answer(prompt, options:IParams=GenerationParams): Promise { const main_prompt = buildSolgptPromt(prompt, this.model_op) - const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]} - return this._makeRequest(payload, AIRequestType.GENERAL) + // const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]} + const payload = { prompt, "endpoint":"solidity_answer", ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) } - async code_explaining(prompt, context:string=""): Promise { - const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]} - return this._makeRequest(payload, AIRequestType.GENERAL) + async code_explaining(prompt, context:string="", options:IParams=GenerationParams): Promise { + // const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]} + const payload = { prompt, "endpoint":"code_explaining", context, ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) } - async error_explaining(prompt): Promise { - const payload = { "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]} - return this._makeRequest(payload, AIRequestType.GENERAL) + async error_explaining(prompt, options:IParams=GenerationParams): Promise { + const payload = { prompt, "endpoint":"error_explaining", ...options } + if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL) + else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL) } } diff --git a/libs/remix-ai-core/src/types/models.ts b/libs/remix-ai-core/src/types/models.ts index e3ed62fe74..6ddfd4fbbd 100644 --- a/libs/remix-ai-core/src/types/models.ts +++ b/libs/remix-ai-core/src/types/models.ts @@ -76,6 +76,7 @@ const GenerationParams:IParams = { topP: 0.92, max_new_tokens: 2000, stream_result: false, + repeat_penalty: 1.2, } export { DefaultModels, CompletionParams, InsertionParams, GenerationParams } \ No newline at end of file diff --git a/libs/remix-ai-core/src/types/types.ts b/libs/remix-ai-core/src/types/types.ts index fe7ac5469d..aefb72d622 100644 --- a/libs/remix-ai-core/src/types/types.ts +++ b/libs/remix-ai-core/src/types/types.ts @@ -58,7 +58,7 @@ export interface IParams { temperature?: number; max_new_tokens?: number; repetition_penalty?: number; - repeatPenalty?:any + repeat_penalty?:any no_repeat_ngram_size?: number; num_beams?: number; num_return_sequences?: number; diff --git a/libs/remix-ui/remix-ai/src/lib/components/personas.tsx b/libs/remix-ui/remix-ai/src/lib/components/personas.tsx new file mode 100644 index 0000000000..850fee3ac7 --- /dev/null +++ b/libs/remix-ui/remix-ai/src/lib/components/personas.tsx @@ -0,0 +1,8 @@ +import { PersonaOptions, UserPersona } from '@nlux/react'; + +export const user: UserPersona = { + name: 'Pipper', + avatar: 'assets/img/remix-logo-blue.png' +}; + +export const assistantAvatar = 'assets/img/remi-prof.webp'; diff --git a/libs/remix-ui/remix-ai/src/lib/components/send.ts b/libs/remix-ui/remix-ai/src/lib/components/send.ts new file mode 100644 index 0000000000..cd40b2c40f --- /dev/null +++ b/libs/remix-ui/remix-ai/src/lib/components/send.ts @@ -0,0 +1,47 @@ + +// const demoProxyServerUrl = 'https://solcoder.remixproject.org'; + +// export const send: StreamSend = async ( +// prompt: string, +// observer: StreamingAdapterObserver, +// plugin: any, +// ) => { +// const body = {"data": [prompt, 'solidity_answer', false,2000,0.9,0.8,50]}; +// const response = await axios(demoProxyServerUrl, { +// method: 'POST', +// headers: {'Content-Type': 'application/json'}, +// data: JSON.stringify(body), +// }); + +// console.log(plugin); +// const result = await plugin.call('remixAI', 'solidity_answer', prompt); + +// if (response.status !== 200) { +// observer.error(new Error('Failed to connect to the server')); +// return; +// } + +// if (response.statusText !== "OK") { +// return; +// } + +// // Read a stream of server-sent events +// // and feed them to the observer as they are being generated +// // const reader = response.body.getReader(); +// // const textDecoder = new TextDecoder(); + +// // while (true) { +// // const {value, done} = await reader.read(); +// // if (done) { +// // break; +// // } + +// // const content = textDecoder.decode(value); +// // if (content) { +// // observer.next(content); +// // } +// // } + +// observer.next(response.data.data[0]); +// observer.complete(); +// };