streaming inference result on desktop

pull/5098/head
Stéphane Tetsing 4 months ago
parent 09fbfdfa71
commit 52f594e04d
  1. 6
      apps/circuit-compiler/src/app/components/container.tsx
  2. 46
      apps/remix-ide/src/app/plugins/remixAIPlugin.tsx
  3. 2
      apps/remix-ide/src/remixEngine.js
  4. 257
      apps/remixdesktop/src/lib/InferenceServerManager.ts
  5. 49
      apps/remixdesktop/src/plugins/remixAIDektop.ts
  6. 2
      apps/vyper/src/app/utils/remix-client.tsx
  7. 11
      libs/remix-ai-core/src/index.ts
  8. 62
      libs/remix-ai-core/src/inferencers/remote/remoteInference.ts
  9. 38
      libs/remix-ai-core/src/types/models.ts
  10. 10
      libs/remix-ai-core/src/types/types.ts
  11. 12
      libs/remix-ui/editor/src/lib/remix-ui-editor.tsx
  12. 26
      libs/remix-ui/remix-ai/src/lib/components/Default.tsx
  13. 4
      libs/remix-ui/renderer/src/lib/renderer.tsx
  14. 2
      libs/remix-ui/settings/src/lib/remix-ui-settings.tsx
  15. 6
      libs/remix-ui/tabs/src/lib/remix-ui-tabs.tsx
  16. 8
      libs/remix-ui/terminal/src/lib/remix-ui-terminal.tsx

@ -73,7 +73,7 @@ export function Container () {
explain why the error occurred and how to fix it.
`
// @ts-ignore
await circuitApp.plugin.call('solcoder', 'error_explaining', message)
await circuitApp.plugin.call('remixAI', 'error_explaining', message)
} else {
const message = `
error message: ${error}
@ -81,7 +81,7 @@ export function Container () {
explain why the error occurred and how to fix it.
`
// @ts-ignore
await circuitApp.plugin.call('solcoder', 'error_explaining', message)
await circuitApp.plugin.call('remixAI', 'error_explaining', message)
}
} else {
const error = report.message
@ -91,7 +91,7 @@ export function Container () {
explain why the error occurred and how to fix it.
`
// @ts-ignore
await circuitApp.plugin.call('solcoder', 'error_explaining', message)
await circuitApp.plugin.call('remixAI', 'error_explaining', message)
}
}

@ -3,7 +3,8 @@ import { ViewPlugin } from '@remixproject/engine-web'
import { Plugin } from '@remixproject/engine';
import { RemixAITab } from '@remix-ui/remix-ai'
import React from 'react';
import { ICompletions, IModel, RemoteInferencer } from '@remix/remix-ai-core';
import { ICompletions, IModel, RemoteInferencer, IRemoteModel } from '@remix/remix-ai-core';
import { resourceUsage } from 'process';
const profile = {
name: 'remixAI',
@ -11,7 +12,7 @@ const profile = {
methods: ['code_generation', 'code_completion',
"solidity_answer", "code_explaining",
"code_insertion", "error_explaining",
"initializeRemixAI"],
"initialize"],
events: [],
icon: 'assets/img/remix-logo-blue.png',
description: 'RemixAI provides AI services to Remix IDE.',
@ -25,7 +26,6 @@ const profile = {
export class RemixAIPlugin extends ViewPlugin {
isOnDesktop:boolean = false
aiIsActivated:boolean = false
selectedModel:IModel = null
readonly remixDesktopPluginName = 'remixAID'
remoteInferencer:RemoteInferencer = null
@ -34,7 +34,7 @@ export class RemixAIPlugin extends ViewPlugin {
super(profile)
this.isOnDesktop = inDesktop
// not user machine ressource for remote inferencing
// user machine dont use ressource for remote inferencing
}
onActivation(): void {
@ -45,15 +45,18 @@ export class RemixAIPlugin extends ViewPlugin {
}
}
async initializeRemixAI(model: IModel) {
this.selectedModel = model
async initialize(model1?:IModel, model2?:IModel, remoteModel?:IRemoteModel){
if (this.isOnDesktop) {
this.call(this.remixDesktopPluginName, 'initializeModelBackend', this.selectedModel)
this.call(this.remixDesktopPluginName, 'initializeModelBackend', false, model1, model2)
this.on(this.remixDesktopPluginName, 'onStreamResult', (value) => {
console.log('onStreamResult remixai plugin', value)
this.call('terminal', 'log', { type: 'log', value: value })
})
} else {
// on browser
console.log('Initializing RemixAIPlugin on browser')
this.remoteInferencer = new RemoteInferencer(this)
this.remoteInferencer = new RemoteInferencer(remoteModel?.apiUrl, remoteModel?.completionUrl)
}
this.aiIsActivated = true
@ -61,6 +64,7 @@ export class RemixAIPlugin extends ViewPlugin {
}
async code_generation(prompt: string): Promise<any> {
console.log('code_generation')
if (this.isOnDesktop) {
return this.call(this.remixDesktopPluginName, 'code_generation', prompt)
} else {
@ -77,27 +81,41 @@ export class RemixAIPlugin extends ViewPlugin {
}
async solidity_answer(prompt: string): Promise<any> {
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
let result
if (this.isOnDesktop) {
return this.call(this.remixDesktopPluginName, 'solidity_answer', prompt)
result = this.call(this.remixDesktopPluginName, 'solidity_answer', prompt)
} else {
return this.remoteInferencer.solidity_answer(prompt)
result = this.remoteInferencer.solidity_answer(prompt)
}
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result })
}
async code_explaining(prompt: string): Promise<any> {
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
let result
if (this.isOnDesktop) {
return this.call(this.remixDesktopPluginName, 'code_explaining', prompt)
result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt)
} else {
return this.remoteInferencer.code_explaining(prompt)
result = this.remoteInferencer.code_explaining(prompt)
}
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result })
}
async error_explaining(prompt: string): Promise<any> {
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
let result
if (this.isOnDesktop) {
return this.call(this.remixDesktopPluginName, 'error_explaining', prompt)
result = await this.call(this.remixDesktopPluginName, 'error_explaining', prompt)
} else {
return this.remoteInferencer.error_explaining(prompt)
result = await this.remoteInferencer.error_explaining(prompt)
}
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result })
}
async code_insertion(msg_pfx: string, msg_sfx: string): Promise<any> {

@ -26,8 +26,8 @@ export class RemixEngine extends Engine {
if (name === 'compilerloader') return { queueTimeout: 60000 * 4 }
if (name === 'filePanel') return { queueTimeout: 60000 * 20 }
if (name === 'fileManager') return { queueTimeout: 60000 * 20 }
if (name === 'solcoder') return { queueTimeout: 60000 * 2 }
if (name === 'remixAID') return { queueTimeout: 60000 * 20 }
if (name === 'remixAI') return { queueTimeout: 60000 * 20 }
if (name === 'cookbookdev') return { queueTimeout: 60000 * 3 }
return { queueTimeout: 10000 }
}

@ -1,77 +1,150 @@
import path from 'path';
import path, { resolve } from 'path';
const { spawn } = require('child_process'); // eslint-disable-line
import fs from 'fs';
import axios from "axios";
import { EventEmitter } from 'events';
import { ICompletions, IModel, IParams } from "@remix/remix-ai-core";
import { getInsertionPrompt } from "@remix/remix-ai-core";
import { ICompletions, IModel, IParams, InsertionParams,
CompletionParams, GenerationParams, ModelType,
IStreamResponse } from "../../../../libs/remix-ai-core/src/index"
class ServerStatusTimer {
private intervalId: NodeJS.Timeout | null = null;
public interval: number;
private task: () => void;
constructor(task: () => void, interval: number) {
this.task = task;
this.interval = interval;
}
start(): void {
if (this.intervalId === null) {
this.intervalId = setInterval(() => {
this.task();
}, this.interval);
}
}
const completionParams:IParams = {
temperature: 0.8,
topK: 40,
topP: 0.92,
max_new_tokens: 15,
stop(): void {
if (this.intervalId !== null) {
clearInterval(this.intervalId);
this.intervalId = null;
}
}
const insertionParams:IParams = {
temperature: 0.8,
topK: 40,
topP: 0.92,
max_new_tokens: 150,
isRunning(): boolean {
return this.intervalId !== null;
}
}
export class InferenceManager implements ICompletions {
isReady: boolean = false
selectedModel: any
modelPath: string
selectedModels: IModel[] = []
event: EventEmitter
modelCacheDir: string = undefined
isInferencing: boolean = false
inferenceProcess: any=null
inferenceURL = 'http://127.0.0.1:5501'
static instance=null
private inferenceProcess: any=null
port = 5501
inferenceURL = 'http://127.0.0.1:' + this.port
private static instance=null
stateTimer: ServerStatusTimer
private constructor(model:IModel, modelDir:string) {
this.selectedModel = model
private constructor(modelDir:string) {
this.event = new EventEmitter()
this.modelCacheDir = path.join(modelDir, 'models')
this.stateTimer= new ServerStatusTimer(() => { this._processStatus()}, 20000)
}
static getInstance(model:IModel, modelDir:string){
static getInstance(modelDir:string){
if (!InferenceManager.instance) {
// check if ther is a process already running
if (!model || !modelDir) {
console.error('Model and model directory is required to create InferenceManager instance')
if (!modelDir) {
console.error('model directory is required to create InferenceManager instance')
return null
}
console.log('Creating new InferenceManager instance')
InferenceManager.instance = new InferenceManager(model, modelDir)
InferenceManager.instance = new InferenceManager(modelDir)
}
return InferenceManager.instance
}
async init() {
// init the backend with a new model
async init(model:IModel) {
try {
await this._downloadModel(this.selectedModel)
await this._downloadModel(model)
if (this.modelPath === undefined) {
if (model.downloadPath === undefined) {
console.log('Model not downloaded or not found')
return
}
console.log('Model downloaded at', this.modelPath)
console.log('Model downloaded at', model.downloadPath)
this._startServer()
if (this.inferenceProcess === null) await this._startServer()
switch (model.modelType) {
case ModelType.CODE_COMPLETION_INSERTION || ModelType.CODE_COMPLETION:{
const res = await this._makeRequest('init_completion', { model_path: model.downloadPath })
if (res?.data?.status === "success") {
this.isReady = true
console.log('Completion Model initialized successfully')
} else {
this.isReady = false
console.error('Error initializing the model', res.data?.error)
}
break;
}
case ModelType.GENERAL:{
const res = await this._makeRequest('init', { model_path: model.downloadPath })
if (res.data?.status === "success") {
this.isReady = true
console.log('General Model initialized successfully')
} else {
this.isReady = false
console.error('Error initializing the model', res.data?.error)
}
break;
}
}
this.stateTimer.start()
this.selectedModels.push(model)
} catch (error) {
console.error('Error initializing the model', error)
this.isReady = false
InferenceManager.instance = null
}
}
async _processStatus() {
const options = { headers: { 'Content-Type': 'application/json', } }
const state = await axios.get(this.inferenceURL+"/state", options)
if (!state.data?.status) {
console.log('Inference server not running')
InferenceManager.instance = null
this.stateTimer.interval += this.stateTimer.interval
if (this.stateTimer.interval >= 60000) {
// attempt to restart the server
console.log('Attempting to restart the server')
this.stopInferenceServer()
this._startServer()
this.stateTimer.interval = 20000
}
} else {
// Server is running with successful request
// console.log('Inference server is running')
// console.log('completion is runnig', state.data?.completion)
// console.log('general is runnig', state.data?.general)
}
async _downloadModel(model): Promise<void> {
}
async _downloadModel(model:IModel): Promise<string> {
if (this.modelCacheDir === undefined) {
console.log('Model cache directory not provided')
return
@ -79,10 +152,12 @@ export class InferenceManager implements ICompletions {
const outputLocationPath = path.join(this.modelCacheDir, model.modelName);
console.log('output location path is', outputLocationPath)
if (fs.existsSync(outputLocationPath)) {
this.modelPath = outputLocationPath
model.downloadPath = outputLocationPath
console.log('Model already exists in the output location', outputLocationPath);
return;
}
console.log('Downloading model from', model.downloadUrl);
// Make a HEAD request to get the file size
const { headers } = await axios.head(model.downloadUrl);
const totalSize = parseInt(headers['content-length'], 10);
@ -109,7 +184,7 @@ export class InferenceManager implements ICompletions {
response.data.pipe(writer);
this.event.emit('ready')
this.modelPath = outputLocationPath
model.downloadPath = outputLocationPath
console.log('LLama Download complete');
return new Promise((resolve, reject) => {
@ -129,7 +204,7 @@ export class InferenceManager implements ICompletions {
// Check if the file exists
if (!fs.existsSync(serverPath)) {
return reject(new Error(`Python script not found at ${serverPath}`));
return reject(new Error(`Inference server not found at ${serverPath}`));
}
// Check file permissions
@ -139,8 +214,7 @@ export class InferenceManager implements ICompletions {
return reject(new Error(`No execute permission on ${serverPath}`));
}
console.log('Running in non-pkg environment');
const spawnArgs = ['5501', this.modelPath];
const spawnArgs = [this.port];
console.log(`Spawning process: ${serverPath} ${spawnArgs.join(' ')}`);
this.inferenceProcess = spawn(serverPath, spawnArgs);
@ -154,7 +228,11 @@ export class InferenceManager implements ICompletions {
});
this.inferenceProcess.stderr.on('data', (data) => {
console.error(`Inference server: ${data}`);
console.error(`Inference log: ${data}`);
if (data.includes('Address already in use')) {
console.error(`Port ${this.port} is already in use. Please stop the existing server and try again`);
reject(new Error(`Port ${this.port} is already in use`));
}
resolve();
});
@ -179,14 +257,14 @@ export class InferenceManager implements ICompletions {
}
}
private async _makeRequest(endpoint, payload){
private async _makeInferenceRequest(endpoint, payload){
try {
this.event.emit('onInference')
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options)
this.event.emit('onInferenceDone')
if (response?.data?.generatedText) {
if (response.data?.generatedText) {
return response.data.generatedText
} else { return "" }
} catch (error) {
@ -194,7 +272,56 @@ export class InferenceManager implements ICompletions {
}
}
async code_completion(context: any, params:IParams=completionParams): Promise<any> {
private async _streamInferenceRequest(endpoint, payload){
try {
this.event.emit('onInference')
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios({
method: 'post',
url: `${this.inferenceURL}/${endpoint}`,
data: payload,
headers: {
"Content-Type": "application/json",
"Accept": "text/event-stream",
}
, responseType: 'stream' });
response.data.on('data', (chunk: Buffer) => {
try {
const parsedData = JSON.parse(chunk.toString());
if (parsedData.isGenerating) {
this.event.emit('onStreamResult', parsedData.generatedText);
} else {
return parsedData.generatedText
}
} catch (error) {
console.error('Error parsing JSON:', error);
}
});
return "" // return empty string for now as payload already handled in event
} catch (error) {
console.error('Error making stream request to Inference server:', error.message);
}
finally {
this.event.emit('onInferenceDone')
}
}
private async _makeRequest(endpoint, payload){
try {
const options = { headers: { 'Content-Type': 'application/json', } }
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options)
this.event.emit('onInferenceDone')
return response
} catch (error) {
console.error('Error making request to Inference server:', error.message);
}
}
async code_completion(context: any, params:IParams=CompletionParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
@ -202,17 +329,63 @@ export class InferenceManager implements ICompletions {
// as of now no prompt required
const payload = { context_code: context, ...params }
return this._makeRequest('code_completion', payload)
return this._makeInferenceRequest('code_completion', payload)
}
async code_insertion(msg_pfx: string, msg_sfx: string, params:IParams=insertionParams): Promise<any> {
async code_insertion(msg_pfx: string, msg_sfx: string, params:IParams=InsertionParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
const payload = { code_pfx:msg_pfx, code_sfx:msg_sfx, ...params }
return this._makeRequest('code_insertion', payload)
return this._makeInferenceRequest('code_insertion', payload)
}
async code_generation(prompt: string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
return this._makeInferenceRequest('code_generation', { prompt, ...params })
}
async code_explaining(code:string, context:string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
if (GenerationParams.stream_result) {
return this._streamInferenceRequest('code_explaining', { code, context, ...params })
} else {
return this._makeInferenceRequest('code_explaining', { code, context, ...params })
}
}
async error_explaining(prompt: string, params:IParams=GenerationParams): Promise<any>{
if (!this.isReady) {
console.log('model not ready yet')
return ""
}
if (GenerationParams.stream_result) {
return this._streamInferenceRequest('error_explaining', { prompt, ...params })
} else {
return this._makeInferenceRequest('error_explaining', { prompt, ...params })
}
}
async solidity_answer(prompt: string, params:IParams=GenerationParams): Promise<any> {
if (!this.isReady) {
console.log('model not ready yet')
return
}
if (GenerationParams.stream_result) {
return this._streamInferenceRequest('solidity_answer', { prompt, ...params })
} else {
return this._makeInferenceRequest('solidity_answer', { prompt, ...params })
}
}
// kill dangling process making use of the port
}

@ -5,6 +5,8 @@ import { Profile } from "@remixproject/plugin-utils"
// use remix ai core
import { InferenceManager } from "../lib/InferenceServerManager"
import { cacheDir } from "../utils/config"
import { RemoteInferencer } from "../../../../libs/remix-ai-core/src/index"
// import { isE2E } from "../main";
const profile = {
@ -42,7 +44,7 @@ const clientProfile: Profile = {
class RemixAIDesktopPluginClient extends ElectronBasePluginClient {
readonly modelCacheDir: string = cacheDir
InferenceModel:InferenceManager = null
desktopInferencer:InferenceManager | RemoteInferencer = null
constructor (webContentsId: number, profile: Profile){
console.log("loading the remix plugin client ........................")
@ -62,23 +64,56 @@ class RemixAIDesktopPluginClient extends ElectronBasePluginClient {
this.emit('enabled')
}
async initializeModelBackend(multitaskModel: any){
if (this.InferenceModel === null) {
async initializeModelBackend(local, generalModel?, completionModel?){
if (local){
console.log('Initializing Inference model locally')
this.desktopInferencer = new RemoteInferencer()
} else {
if (!this.desktopInferencer){
console.log('Initializing Inference model')
this.InferenceModel = InferenceManager.getInstance(multitaskModel, this.modelCacheDir)
if (!this.InferenceModel.isReady) this.InferenceModel.init()
this.desktopInferencer = InferenceManager.getInstance(this.modelCacheDir)
} else {
console.log('Inference model already initialized')
}
if (this.desktopInferencer instanceof InferenceManager && generalModel) await this.desktopInferencer.init(generalModel)
if (this.desktopInferencer instanceof InferenceManager && completionModel) await this.desktopInferencer.init(completionModel)
}
// set event listeners
this.desktopInferencer.event.on('onStreamResult', (data) => {
this.emit('onStreamResult', data)
})
this.desktopInferencer.event.on('onInference', () => {
this.emit('onInference')
})
this.desktopInferencer.event.on('onInfrenceDone', () => {
this.emit('onInfrenceDone')
})
}
async code_completion(context: any) {
// use general purpose model
return this.InferenceModel.code_completion(context)
return this.desktopInferencer.code_completion(context)
}
async code_insertion(msg_pfx: string, msg_sfx: string) {
return this.InferenceModel.code_insertion(msg_pfx, msg_sfx)
return this.desktopInferencer.code_insertion(msg_pfx, msg_sfx)
}
async code_generation(prompt: string) {
return this.desktopInferencer.code_generation(prompt)
}
async code_explaining(code:string, context?:string) {
return this.desktopInferencer.code_explaining(code, context)
}
async error_explaining(prompt: string) {
return this.desktopInferencer.error_explaining(prompt)
}
async solidity_answer(prompt: string) {
return this.desktopInferencer.solidity_answer(prompt)
}
changemodel(newModel: any){

@ -70,7 +70,7 @@ export class RemixClient extends PluginClient {
${message}
can you explain why this error occurred and how to fix it?
`
await this.client.call('solcoder' as any, 'error_explaining', message)
await this.client.call('remixAI' as any, 'error_explaining', message)
} catch (err) {
console.error('unable to askGpt')
console.error(err)

@ -1,17 +1,18 @@
'use strict'
import { IModel, IModelResponse, IModelRequest, InferenceModel, ICompletions,
IParams, ChatEntry, AIRequestType, RemoteBackendOPModel } from './types/types'
IParams, ChatEntry, AIRequestType, IRemoteModel,
RemoteBackendOPModel, IStreamResponse } from './types/types'
import { ModelType } from './types/constants'
import { DefaultModels } from './types/models'
import { DefaultModels, InsertionParams, CompletionParams, GenerationParams } from './types/models'
import { getCompletionPrompt, getInsertionPrompt } from './prompts/completionPrompts'
import { PromptBuilder } from './prompts/promptBuilder'
import { RemoteInferencer } from './inferencers/remote/remoteInference'
export {
IModel, IModelResponse, IModelRequest, InferenceModel,
ModelType, DefaultModels, ICompletions, IParams,
getCompletionPrompt, getInsertionPrompt,
RemoteInferencer,
ModelType, DefaultModels, ICompletions, IParams, IRemoteModel,
getCompletionPrompt, getInsertionPrompt, IStreamResponse,
RemoteInferencer, InsertionParams, CompletionParams, GenerationParams,
ChatEntry, AIRequestType, RemoteBackendOPModel, PromptBuilder
}

@ -1,6 +1,7 @@
import { ICompletions, IParams, ChatEntry, AIRequestType, RemoteBackendOPModel } from "../../types/types";
import { PromptBuilder } from "../../prompts/promptBuilder";
import axios from "axios";
import EventEmitter from "events";
const defaultErrorMessage = `Unable to get a response from AI server`
@ -10,13 +11,13 @@ export class RemoteInferencer implements ICompletions {
solgpt_chat_history:ChatEntry[]
max_history = 7
model_op = RemoteBackendOPModel.DEEPSEEK
mainPlugin = null
event: EventEmitter
constructor(plugin, apiUrl?:string, completionUrl?:string) {
constructor(apiUrl?:string, completionUrl?:string) {
this.api_url = apiUrl!==undefined ? apiUrl: "https://solcoder.remixproject.org"
this.completion_url = completionUrl!==undefined ? completionUrl : "https://completion.remixproject.org"
this.solgpt_chat_history = []
this.mainPlugin = plugin
this.event = new EventEmitter()
}
private pushChatHistory(prompt, result){
@ -26,7 +27,7 @@ export class RemoteInferencer implements ICompletions {
}
private async _makeRequest(data, rType:AIRequestType){
this.mainPlugin.emit("aiInfering")
this.event.emit("onInference")
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url
console.log("requesting on ", requesURL, rType, data.data[1])
@ -45,27 +46,61 @@ export class RemoteInferencer implements ICompletions {
if (result.statusText === "OK")
return result.data.data[0]
else {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: defaultErrorMessage })
return ""
return defaultErrorMessage
}
case AIRequestType.GENERAL:
if (result.statusText === "OK") {
const resultText = result.data.data[0]
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: resultText })
this.pushChatHistory(prompt, resultText)
return resultText
} else {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: defaultErrorMessage })
return defaultErrorMessage
}
break
}
} catch (e) {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: defaultErrorMessage })
this.solgpt_chat_history = []
return ""
return e
}
finally {
this.mainPlugin.emit("aiInferingDone")
this.event.emit("onInferenceDone")
}
}
private async _streamInferenceRequest(data, rType:AIRequestType){
try {
this.event.emit('onInference')
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url
const options = { headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" } }
const response = await axios({
method: 'post',
url: rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url,
data: data,
headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" },
responseType: 'stream'
});
response.data.on('data', (chunk: Buffer) => {
try {
const parsedData = JSON.parse(chunk.toString());
if (parsedData.isGenerating) {
this.event.emit('onStreamResult', parsedData.generatedText);
} else {
return parsedData.generatedText
}
} catch (error) {
console.error('Error parsing JSON:', error);
}
});
return "" // return empty string for now as handled in event
} catch (error) {
console.error('Error making stream request to Inference server:', error.message);
}
finally {
this.event.emit('onInferenceDone')
}
}
@ -90,20 +125,17 @@ export class RemoteInferencer implements ICompletions {
}
async solidity_answer(prompt): Promise<any> {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
const main_prompt = this._build_solgpt_promt(prompt)
const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]}
return this._makeRequest(payload, AIRequestType.GENERAL)
}
async code_explaining(prompt, context:string=""): Promise<any> {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}
return this._makeRequest(payload, AIRequestType.GENERAL)
}
async error_explaining(prompt): Promise<any> {
this.mainPlugin.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` })
const payload = { "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]}
return this._makeRequest(payload, AIRequestType.GENERAL)
}

@ -2,7 +2,7 @@
// create a function getModels returning a list of all supported models
// create a function getModel returning a model by its name
import { IModel } from './types';
import { IModel, IParams } from './types';
import { ModelType } from './constants';
const DefaultModels = (): IModel[] => {
@ -47,7 +47,17 @@ const DefaultModels = (): IModel[] => {
modelType: ModelType.CODE_COMPLETION_INSERTION,
modelReqs: { backend: 'llamacpp', minSysMemory: 2, GPURequired: false, MinGPUVRAM: 2 }
};
return [model1, model2, model3, model4, model5];
const model6: IModel = {
name: 'DeepSeek',
task: 'text-generation',
modelName: 'DeepSeek-Coder-V2-Lite-Base.Q2_K.gguf',
downloadUrl: 'https://huggingface.co/QuantFactory/DeepSeek-Coder-V2-Lite-Base-GGUF/resolve/main/DeepSeek-Coder-V2-Lite-Base.Q2_K.gguf?download=true',
modelType: ModelType.GENERAL,
modelReqs: { backend: 'llamacpp', minSysMemory: 2, GPURequired: false, MinGPUVRAM: 8 }
};
return [model1, model2, model3, model4, model5, model6];
}
const getModel = async (name: string): Promise<IModel | undefined> => {
@ -58,4 +68,26 @@ const loadModel = async (modelname: string): Promise<void> => {
console.log(`Loading model ${modelname}`);
}
export { DefaultModels }
const CompletionParams:IParams = {
temperature: 0.8,
topK: 40,
topP: 0.92,
max_new_tokens: 15,
}
const InsertionParams:IParams = {
temperature: 0.8,
topK: 40,
topP: 0.92,
max_new_tokens: 150,
}
const GenerationParams:IParams = {
temperature: 0.5,
topK: 40,
topP: 0.92,
max_new_tokens: 2000,
stream_result: true,
}
export { DefaultModels, CompletionParams, InsertionParams, GenerationParams }

@ -17,6 +17,11 @@ export interface IModel {
modelName: string;
modelType: ModelType;
modelReqs: IModelRequirements;
downloadPath?: string;
}
export interface IRemoteModel {
completionUrl: string;
apiUrl: string;
}
export interface IModelResponse {
@ -26,6 +31,11 @@ export interface IModelResponse {
model: IModel;
}
export interface IStreamResponse {
generatedText: string;
isGenerating: boolean;
}
export interface IModelRequest {
input: string;
model: IModel;

@ -737,7 +737,7 @@ export const EditorUI = (props: EditorUIProps) => {
const file = await props.plugin.call('fileManager', 'getCurrentFile')
const content = await props.plugin.call('fileManager', 'readFile', file)
const message = intl.formatMessage({ id: 'editor.generateDocumentationByAI' }, { content, currentFunction: currentFunction.current })
const cm = await props.plugin.call('solcoder', 'code_explaining', message)
const cm = await props.plugin.call('remixAI', 'code_explaining', message)
const natSpecCom = "\n" + extractNatspecComments(cm)
const cln = await props.plugin.call('codeParser', "getLineColumnOfNode", currenFunctionNode)
@ -773,7 +773,7 @@ export const EditorUI = (props: EditorUIProps) => {
},
]);
_paq.push(['trackEvent', 'ai', 'solcoder', 'generateDocumentation'])
_paq.push(['trackEvent', 'ai', 'remixAI', 'generateDocumentation'])
},
}
@ -791,8 +791,8 @@ export const EditorUI = (props: EditorUIProps) => {
const file = await props.plugin.call('fileManager', 'getCurrentFile')
const content = await props.plugin.call('fileManager', 'readFile', file)
const message = intl.formatMessage({ id: 'editor.explainFunctionByAI' }, { content, currentFunction: currentFunction.current })
await props.plugin.call('solcoder', 'code_explaining', message, content)
_paq.push(['trackEvent', 'ai', 'solcoder', 'explainFunction'])
await props.plugin.call('remixAI', 'code_explaining', message, content)
_paq.push(['trackEvent', 'ai', 'remixAI', 'explainFunction'])
},
}
@ -811,8 +811,8 @@ export const EditorUI = (props: EditorUIProps) => {
const content = await props.plugin.call('fileManager', 'readFile', file)
const selectedCode = editor.getModel().getValueInRange(editor.getSelection())
await props.plugin.call('solcoder', 'code_explaining', selectedCode, content)
_paq.push(['trackEvent', 'ai', 'solcoder', 'explainFunction'])
await props.plugin.call('remixAI', 'code_explaining', selectedCode, content)
_paq.push(['trackEvent', 'ai', 'remixAI', 'explainFunction'])
},
}

@ -6,6 +6,20 @@ export const Default = (props) => {
const [searchText, setSearchText] = useState('');
const [resultText, setResultText] = useState('');
const pluginName = 'remixAI'
const appendText = (newText) => {
setResultText(resultText => resultText + newText);
}
useEffect(() => {
const handleResultReady = async (e) => {
appendText(e);
};
if (props.plugin.isOnDesktop ) {
props.plugin.on(props.plugin.remixDesktopPluginName, 'onStreamResult', (value) => {
handleResultReady(value);
})
}
}, [])
return (
<div>
@ -46,19 +60,17 @@ export const Default = (props) => {
rows={10}
cols={50}
placeholder="Results..."
onChange={(e) => {
console.log('resultText changed', e.target.value)
setResultText(e.target.value)}
}
value={resultText}
readOnly
/>
<button className="remix_ai_plugin_download_button text-ai pl-2 pr-0 py-0 d-flex"
onClick={async () => {
props.plugin.call(pluginName, 'initializeRemixAI', DefaultModels()[3]);
// if (props.plugin.isOnDesktop ) {
// console.log(Date.now(), "Init model backend");
// props.plugin.call(pluginName, 'initializeModelBackend', DefaultModels()[3]);
// console.log(Date.now(), "after Init model backend");
// console.log("Got transformer model completion ");
// }
props.plugin.call("remixAI", 'initialize', DefaultModels()[1]);
}}
> Init Model </button>
</div>

@ -75,8 +75,8 @@ export const Renderer = ({ message, opt = {}, plugin }: RendererProps) => {
try {
const content = await plugin.call('fileManager', 'readFile', editorOptions.errFile)
const message = intl.formatMessage({ id: 'solidity.openaigptMessage' }, { content, messageText })
await plugin.call('solcoder', 'error_explaining', message)
_paq.push(['trackEvent', 'ai', 'solcoder', 'error_explaining_SolidityError'])
await plugin.call('remixAI', 'error_explaining', message)
_paq.push(['trackEvent', 'ai', 'remixAI', 'error_explaining_SolidityError'])
} catch (err) {
console.error('unable to askGtp')
console.error(err)

@ -462,7 +462,7 @@ export const RemixUiSettings = (props: RemixUiSettingsProps) => {
role='link'
onClick={()=>{
window.open("https://remix-ide.readthedocs.io/en/latest/ai.html")
_paq.push(['trackEvent', 'ai', 'solcoder', 'documentation'])
_paq.push(['trackEvent', 'ai', 'remixAI', 'documentation'])
}}
>
<i aria-hidden="true" className="fas fa-book"></i>

@ -251,9 +251,9 @@ export const TabsUI = (props: TabsUIProps) => {
const content = await props.plugin.call('fileManager', 'readFile', path)
if (tabsState.currentExt === 'sol') {
setExplaining(true)
await props.plugin.call('solcoder', 'code_explaining', content)
await props.plugin.call('remixAI', 'code_explaining', content)
setExplaining(false)
_paq.push(['trackEvent', 'ai', 'solcoder', 'explain_file'])
_paq.push(['trackEvent', 'ai', 'remixAI', 'explain_file'])
}
}}
>
@ -283,7 +283,7 @@ export const TabsUI = (props: TabsUIProps) => {
onClick={async () => {
await props.plugin.call('settings', 'updateCopilotChoice', !ai_switch)
setAI_switch(!ai_switch)
ai_switch ? _paq.push(['trackEvent', 'ai', 'solcoder', 'copilot_enabled']) : _paq.push(['trackEvent', 'ai', 'solcoder', 'copilot_disabled'])
ai_switch ? _paq.push(['trackEvent', 'ai', 'remixAI', 'copilot_enabled']) : _paq.push(['trackEvent', 'ai', 'remixAI', 'copilot_disabled'])
}}
>
<i className={ai_switch ? "fas fa-toggle-on fa-lg" : "fas fa-toggle-off fa-lg"}></i>

@ -238,12 +238,12 @@ export const RemixUiTerminal = (props: RemixUiTerminalProps) => {
// TODO: rm gpt or redirect gpt to sol-pgt
} else if (script.trim().startsWith('gpt')) {
call('terminal', 'log',{ type: 'warn', value: `> ${script}` })
await call('solcoder', 'solidity_answer', script)
_paq.push(['trackEvent', 'ai', 'solcoder', 'askFromTerminal'])
await call('remixAI', 'solidity_answer', script)
_paq.push(['trackEvent', 'ai', 'remixAI', 'askFromTerminal'])
} else if (script.trim().startsWith('sol-gpt')) {
call('terminal', 'log',{ type: 'warn', value: `> ${script}` })
await call('solcoder', 'solidity_answer', script)
_paq.push(['trackEvent', 'ai', 'solcoder', 'askFromTerminal'])
await call('remixAI', 'solidity_answer', script)
_paq.push(['trackEvent', 'ai', 'remixAI', 'askFromTerminal'])
} else {
await call('scriptRunner', 'execute', script)
}

Loading…
Cancel
Save