pull/5241/head
STetsing 2 months ago
parent edfb3bda09
commit 1e68ae058a
  1. 12
      apps/remix-ide/src/app/plugins/remixAIPlugin.tsx
  2. 4
      apps/remixdesktop/src/lib/InferenceServerManager.ts
  3. 106
      libs/remix-ai-core/src/inferencers/remote/remoteInference.ts
  4. 1
      libs/remix-ai-core/src/types/models.ts
  5. 2
      libs/remix-ai-core/src/types/types.ts
  6. 8
      libs/remix-ui/remix-ai/src/lib/components/personas.tsx
  7. 47
      libs/remix-ui/remix-ai/src/lib/components/send.ts

@ -3,7 +3,7 @@ import { ViewPlugin } from '@remixproject/engine-web'
import { Plugin } from '@remixproject/engine'; import { Plugin } from '@remixproject/engine';
import { RemixAITab } from '@remix-ui/remix-ai' import { RemixAITab } from '@remix-ui/remix-ai'
import React from 'react'; import React from 'react';
import { ICompletions, IModel, RemoteInferencer, IRemoteModel } from '@remix/remix-ai-core'; import { ICompletions, IModel, RemoteInferencer, IRemoteModel, IParams, GenerationParams } from '@remix/remix-ai-core';
const profile = { const profile = {
name: 'remixAI', name: 'remixAI',
@ -127,7 +127,8 @@ export class RemixAIPlugin extends ViewPlugin {
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" })
return return
} }
const params:IParams = GenerationParams
params.stream_result = true
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
let result let result
@ -135,7 +136,7 @@ export class RemixAIPlugin extends ViewPlugin {
result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt) result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt)
} else { } else {
result = await this.remoteInferencer.code_explaining(prompt) result = await this.remoteInferencer.code_explaining(prompt, "", params)
} }
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result })
// this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" }) // this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" })
@ -148,13 +149,16 @@ export class RemixAIPlugin extends ViewPlugin {
return return
} }
const params:IParams = GenerationParams
params.stream_result = true
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
let result let result
if (this.isOnDesktop) { if (this.isOnDesktop) {
result = await this.call(this.remixDesktopPluginName, 'error_explaining', prompt) result = await this.call(this.remixDesktopPluginName, 'error_explaining', prompt)
} else { } else {
result = await this.remoteInferencer.error_explaining(prompt) result = await this.remoteInferencer.error_explaining(prompt, params)
} }
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result })
// this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" }) // this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" })

@ -484,9 +484,9 @@ export class InferenceManager implements ICompletions {
return return
} }
if (params.stream_result) { if (params.stream_result) {
return this._streamInferenceRequest('code_explaining', { code, context, ...params }) return this._streamInferenceRequest('code_explaining', { prompt: code, context, ...params })
} else { } else {
return this._makeInferenceRequest('code_explaining', { code, context, ...params }, AIRequestType.GENERAL) return this._makeInferenceRequest('code_explaining', { prompt: code, context, ...params }, AIRequestType.GENERAL)
} }
} }

@ -1,8 +1,10 @@
import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types"; import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types";
import { GenerationParams, CompletionParams, InsertionParams } from "../../types/models";
import { buildSolgptPromt } from "../../prompts/promptBuilder"; import { buildSolgptPromt } from "../../prompts/promptBuilder";
import axios from "axios";
import EventEmitter from "events"; import EventEmitter from "events";
import { ChatHistory } from "../../prompts/chat"; import { ChatHistory } from "../../prompts/chat";
import axios, { AxiosResponse } from 'axios';
import { Readable } from 'stream';
const defaultErrorMessage = `Unable to get a response from AI server` const defaultErrorMessage = `Unable to get a response from AI server`
@ -12,38 +14,34 @@ export class RemoteInferencer implements ICompletions {
max_history = 7 max_history = 7
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
event: EventEmitter event: EventEmitter
test_env=true
constructor(apiUrl?:string, completionUrl?:string) { constructor(apiUrl?:string, completionUrl?:string) {
this.api_url = apiUrl!==undefined ? apiUrl: "https://solcoder.remixproject.org" this.api_url = apiUrl!==undefined ? apiUrl: this.test_env? "http://127.0.0.1:7861/" : "https://solcoder.remixproject.org"
this.completion_url = completionUrl!==undefined ? completionUrl : "https://completion.remixproject.org" this.completion_url = completionUrl!==undefined ? completionUrl : this.test_env? "http://127.0.0.1:7861/" : "https://completion.remixproject.org"
this.event = new EventEmitter() this.event = new EventEmitter()
} }
private async _makeRequest(data, rType:AIRequestType){ private async _makeRequest(endpoint, payload, rType:AIRequestType){
this.event.emit("onInference") this.event.emit("onInference")
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url let requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url
const userPrompt = data.data[0] const userPrompt = payload.prompt
console.log(requesURL)
try { try {
const result = await axios(requesURL, { const options = { headers: { 'Content-Type': 'application/json', } }
method: 'POST', const result = await axios.post(`${requesURL}/${endpoint}`, payload, options)
headers: {
Accept: 'application/json',
'Content-Type': 'application/json',
},
data: JSON.stringify(data),
})
switch (rType) { switch (rType) {
case AIRequestType.COMPLETION: case AIRequestType.COMPLETION:
if (result.statusText === "OK") if (result.statusText === "OK")
return result.data.data[0] return result.data.generatedText
else { else {
return defaultErrorMessage return defaultErrorMessage
} }
case AIRequestType.GENERAL: case AIRequestType.GENERAL:
if (result.statusText === "OK") { if (result.statusText === "OK") {
const resultText = result.data.data[0] const resultText = result.data.generatedText
ChatHistory.pushHistory(userPrompt, resultText) ChatHistory.pushHistory(userPrompt, resultText)
return resultText return resultText
} else { } else {
@ -54,33 +52,35 @@ export class RemoteInferencer implements ICompletions {
} catch (e) { } catch (e) {
ChatHistory.clearHistory() ChatHistory.clearHistory()
console.error('Error making request to Inference server:', e.message) console.error('Error making request to Inference server:', e.message)
return e
} }
finally { finally {
this.event.emit("onInferenceDone") this.event.emit("onInferenceDone")
} }
} }
private async _streamInferenceRequest(data, rType:AIRequestType){ private async _streamInferenceRequest(endpoint, payload, rType:AIRequestType){
let resultText = ""
try { try {
this.event.emit('onInference') this.event.emit('onInference')
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url const requestURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url
const userPrompt = data.data[0] const userPrompt = payload.prompt
const response = await axios({ const response:AxiosResponse<Readable> = await axios({
method: 'post', method: 'post',
url: requesURL, url: `${requestURL}/${endpoint}`,
data: data, data: payload,
headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" }, headers: {
responseType: 'stream' "Content-Type": "application/json",
}); "Accept": "text/event-stream",
}
, responseType: 'blob' });
let resultText = ""
response.data.on('data', (chunk: Buffer) => { response.data.on('data', (chunk: Buffer) => {
try { try {
const parsedData = JSON.parse(chunk.toString()); const parsedData = JSON.parse(chunk.toString());
if (parsedData.isGenerating) { if (parsedData.isGenerating) {
this.event.emit('onStreamResult', parsedData.generatedText); this.event.emit('onStreamResult', parsedData.generatedText);
resultText = resultText + parsedData.generatedText resultText = resultText + parsedData.generatedText
console.log("resultText" + resultText)
} else { } else {
// stream generation is complete // stream generation is complete
resultText = resultText + parsedData.generatedText resultText = resultText + parsedData.generatedText
@ -99,43 +99,49 @@ export class RemoteInferencer implements ICompletions {
console.error('Error making stream request to Inference server:', error.message); console.error('Error making stream request to Inference server:', error.message);
} }
finally { finally {
console.log("end streamin" + resultText)
this.event.emit('onInferenceDone') this.event.emit('onInferenceDone')
} }
} }
async code_completion(prompt, options:IParams=null): Promise<any> { async code_completion(prompt, options:IParams=CompletionParams): Promise<any> {
const payload = !options? const payload = { prompt, "endpoint":"code_completion", ...options }
{ "data": [prompt, "code_completion", "", false, 30, 0.9, 0.90, 50]} : if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
{ "data": [prompt, "code_completion", "", options.stream_result, else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
options.max_new_tokens, options.temperature, options.top_p, options.top_k]
}
return this._makeRequest(payload, AIRequestType.COMPLETION)
} }
async code_insertion(msg_pfx, msg_sfx): Promise<any> { async code_insertion(msg_pfx, msg_sfx, options:IParams=InsertionParams): Promise<any> {
const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]} // const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]}
return this._makeRequest(payload, AIRequestType.COMPLETION) const payload = { prompt, "endpoint":"code_insertion", msg_pfx, msg_sfx, ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
} }
async code_generation(prompt): Promise<any> { async code_generation(prompt, options:IParams=GenerationParams): Promise<any> {
const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]} // const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]}
return this._makeRequest(payload, AIRequestType.COMPLETION) const payload = { prompt, "endpoint":"code_completion", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
else return this._makeRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
} }
async solidity_answer(prompt): Promise<any> { async solidity_answer(prompt, options:IParams=GenerationParams): Promise<any> {
const main_prompt = buildSolgptPromt(prompt, this.model_op) const main_prompt = buildSolgptPromt(prompt, this.model_op)
const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]} // const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]}
return this._makeRequest(payload, AIRequestType.GENERAL) const payload = { prompt, "endpoint":"solidity_answer", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL)
} }
async code_explaining(prompt, context:string=""): Promise<any> { async code_explaining(prompt, context:string="", options:IParams=GenerationParams): Promise<any> {
const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]} // const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}
return this._makeRequest(payload, AIRequestType.GENERAL) const payload = { prompt, "endpoint":"code_explaining", context, ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL)
} }
async error_explaining(prompt): Promise<any> { async error_explaining(prompt, options:IParams=GenerationParams): Promise<any> {
const payload = { "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]} const payload = { prompt, "endpoint":"error_explaining", ...options }
return this._makeRequest(payload, AIRequestType.GENERAL) if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL)
else return this._makeRequest(payload.endpoint, payload, AIRequestType.GENERAL)
} }
} }

@ -76,6 +76,7 @@ const GenerationParams:IParams = {
topP: 0.92, topP: 0.92,
max_new_tokens: 2000, max_new_tokens: 2000,
stream_result: false, stream_result: false,
repeat_penalty: 1.2,
} }
export { DefaultModels, CompletionParams, InsertionParams, GenerationParams } export { DefaultModels, CompletionParams, InsertionParams, GenerationParams }

@ -58,7 +58,7 @@ export interface IParams {
temperature?: number; temperature?: number;
max_new_tokens?: number; max_new_tokens?: number;
repetition_penalty?: number; repetition_penalty?: number;
repeatPenalty?:any repeat_penalty?:any
no_repeat_ngram_size?: number; no_repeat_ngram_size?: number;
num_beams?: number; num_beams?: number;
num_return_sequences?: number; num_return_sequences?: number;

@ -0,0 +1,8 @@
import { PersonaOptions, UserPersona } from '@nlux/react';
export const user: UserPersona = {
name: 'Pipper',
avatar: 'assets/img/remix-logo-blue.png'
};
export const assistantAvatar = 'assets/img/remi-prof.webp';

@ -0,0 +1,47 @@
// const demoProxyServerUrl = 'https://solcoder.remixproject.org';
// export const send: StreamSend = async (
// prompt: string,
// observer: StreamingAdapterObserver,
// plugin: any,
// ) => {
// const body = {"data": [prompt, 'solidity_answer', false,2000,0.9,0.8,50]};
// const response = await axios(demoProxyServerUrl, {
// method: 'POST',
// headers: {'Content-Type': 'application/json'},
// data: JSON.stringify(body),
// });
// console.log(plugin);
// const result = await plugin.call('remixAI', 'solidity_answer', prompt);
// if (response.status !== 200) {
// observer.error(new Error('Failed to connect to the server'));
// return;
// }
// if (response.statusText !== "OK") {
// return;
// }
// // Read a stream of server-sent events
// // and feed them to the observer as they are being generated
// // const reader = response.body.getReader();
// // const textDecoder = new TextDecoder();
// // while (true) {
// // const {value, done} = await reader.read();
// // if (done) {
// // break;
// // }
// // const content = textDecoder.decode(value);
// // if (content) {
// // observer.next(content);
// // }
// // }
// observer.next(response.data.data[0]);
// observer.complete();
// };
Loading…
Cancel
Save