commit
299a88ca78
@ -0,0 +1,33 @@ |
||||
import { ElectronPlugin } from '@remixproject/engine-electron' |
||||
import { IModel, ModelType, DefaultModels } from '@remix/remix-ai-core'; |
||||
import axios from 'axios'; |
||||
import fs from 'fs'; |
||||
|
||||
const desktop_profile = { |
||||
name: 'remixAID', |
||||
displayName: 'RemixAI Desktop', |
||||
maintainedBy: 'Remix', |
||||
description: 'RemixAI provides AI services to Remix IDE Desktop.', |
||||
documentation: 'https://remix-ide.readthedocs.io/en/latest/remixai.html', |
||||
icon: 'assets/img/remix-logo-blue.png', |
||||
methods: ['initializeModelBackend', 'code_completion', 'code_insertion', 'code_generation', 'code_explaining', 'error_explaining', 'solidity_answer'], |
||||
} |
||||
|
||||
export class remixAIDesktopPlugin extends ElectronPlugin { |
||||
constructor() { |
||||
super(desktop_profile) |
||||
} |
||||
|
||||
onActivation(): void { |
||||
this.on('remixAI', 'enabled', () => {} ) |
||||
console.log('remixAIDesktopPlugin activated') |
||||
} |
||||
|
||||
} |
||||
|
||||
// class RemixAIPlugin extends ElectronPlugin {
|
||||
// constructor() {
|
||||
// super(dek)
|
||||
// this.methods = ['downloadModel']
|
||||
// }
|
||||
// }
|
@ -0,0 +1,169 @@ |
||||
import * as packageJson from '../../../../../package.json' |
||||
import { ViewPlugin } from '@remixproject/engine-web' |
||||
import { Plugin } from '@remixproject/engine'; |
||||
import { RemixAITab } from '@remix-ui/remix-ai' |
||||
import React from 'react'; |
||||
import { ICompletions, IModel, RemoteInferencer, IRemoteModel } from '@remix/remix-ai-core'; |
||||
|
||||
const profile = { |
||||
name: 'remixAI', |
||||
displayName: 'Remix AI', |
||||
methods: ['code_generation', 'code_completion', |
||||
"solidity_answer", "code_explaining", |
||||
"code_insertion", "error_explaining", |
||||
"initialize"], |
||||
events: [], |
||||
icon: 'assets/img/remix-logo-blue.png', |
||||
description: 'RemixAI provides AI services to Remix IDE.', |
||||
kind: '', |
||||
// location: 'sidePanel',
|
||||
documentation: 'https://remix-ide.readthedocs.io/en/latest/remixai.html', |
||||
version: packageJson.version, |
||||
maintainedBy: 'Remix' |
||||
} |
||||
|
||||
export class RemixAIPlugin extends Plugin { |
||||
isOnDesktop:boolean = false |
||||
aiIsActivated:boolean = false |
||||
readonly remixDesktopPluginName = 'remixAID' |
||||
remoteInferencer:RemoteInferencer = null |
||||
isInferencing: boolean = false |
||||
|
||||
constructor(inDesktop:boolean) { |
||||
super(profile) |
||||
this.isOnDesktop = inDesktop |
||||
|
||||
// user machine dont use ressource for remote inferencing
|
||||
} |
||||
|
||||
onActivation(): void { |
||||
this.initialize(null, null, null, false) |
||||
} |
||||
|
||||
async initialize(model1?:IModel, model2?:IModel, remoteModel?:IRemoteModel, useRemote?:boolean){ |
||||
if (this.isOnDesktop) { |
||||
// on desktop use remote inferencer -> false
|
||||
console.log('initialize on desktop') |
||||
const res = await this.call(this.remixDesktopPluginName, 'initializeModelBackend', useRemote, model1, model2) |
||||
if (res) { |
||||
this.on(this.remixDesktopPluginName, 'onStreamResult', (value) => { |
||||
this.call('terminal', 'log', { type: 'log', value: value }) |
||||
}) |
||||
|
||||
this.on(this.remixDesktopPluginName, 'onInference', () => { |
||||
this.isInferencing = true |
||||
}) |
||||
|
||||
this.on(this.remixDesktopPluginName, 'onInferenceDone', () => { |
||||
this.isInferencing = false |
||||
}) |
||||
} |
||||
|
||||
} else { |
||||
// on browser
|
||||
this.remoteInferencer = new RemoteInferencer(remoteModel?.apiUrl, remoteModel?.completionUrl) |
||||
this.remoteInferencer.event.on('onInference', () => { |
||||
this.isInferencing = true |
||||
}) |
||||
this.remoteInferencer.event.on('onInferenceDone', () => { |
||||
this.isInferencing = false |
||||
}) |
||||
} |
||||
|
||||
this.aiIsActivated = true |
||||
return true |
||||
} |
||||
|
||||
async code_generation(prompt: string): Promise<any> { |
||||
if (this.isInferencing) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) |
||||
return |
||||
} |
||||
|
||||
if (this.isOnDesktop) { |
||||
return await this.call(this.remixDesktopPluginName, 'code_generation', prompt) |
||||
} else { |
||||
return await this.remoteInferencer.code_generation(prompt) |
||||
} |
||||
} |
||||
|
||||
async code_completion(prompt: string): Promise<any> { |
||||
if (this.isOnDesktop) { |
||||
return await this.call(this.remixDesktopPluginName, 'code_completion', prompt) |
||||
} else { |
||||
return await this.remoteInferencer.code_completion(prompt) |
||||
} |
||||
} |
||||
|
||||
async solidity_answer(prompt: string): Promise<any> { |
||||
if (this.isInferencing) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) |
||||
return |
||||
} |
||||
|
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
|
||||
let result |
||||
if (this.isOnDesktop) { |
||||
result = await this.call(this.remixDesktopPluginName, 'solidity_answer', prompt) |
||||
} else { |
||||
result = await this.remoteInferencer.solidity_answer(prompt) |
||||
} |
||||
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) |
||||
// this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" })
|
||||
return result |
||||
} |
||||
|
||||
async code_explaining(prompt: string): Promise<any> { |
||||
if (this.isInferencing) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) |
||||
return |
||||
} |
||||
|
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
|
||||
let result |
||||
if (this.isOnDesktop) { |
||||
result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt) |
||||
|
||||
} else { |
||||
result = await this.remoteInferencer.code_explaining(prompt) |
||||
} |
||||
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) |
||||
// this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" })
|
||||
return result |
||||
} |
||||
|
||||
async error_explaining(prompt: string): Promise<any> { |
||||
if (this.isInferencing) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" }) |
||||
return |
||||
} |
||||
|
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
|
||||
let result |
||||
if (this.isOnDesktop) { |
||||
result = await this.call(this.remixDesktopPluginName, 'error_explaining', prompt) |
||||
} else { |
||||
result = await this.remoteInferencer.error_explaining(prompt) |
||||
} |
||||
if (result) this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result }) |
||||
// this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI Done" })
|
||||
return result |
||||
} |
||||
|
||||
async code_insertion(msg_pfx: string, msg_sfx: string): Promise<any> { |
||||
if (this.isOnDesktop) { |
||||
return await this.call(this.remixDesktopPluginName, 'code_insertion', msg_pfx, msg_sfx) |
||||
} else { |
||||
return await this.remoteInferencer.code_insertion(msg_pfx, msg_sfx) |
||||
} |
||||
} |
||||
|
||||
// render() {
|
||||
// return (
|
||||
// <RemixAITab plugin={this}></RemixAITab>
|
||||
// )
|
||||
// }
|
||||
} |
@ -1,283 +0,0 @@ |
||||
import { Plugin } from '@remixproject/engine' |
||||
|
||||
export type SuggestOptions = { |
||||
max_new_tokens: number, |
||||
temperature: number, |
||||
do_sample:boolean |
||||
top_k: number, |
||||
top_p:number, |
||||
stream_result:boolean |
||||
} |
||||
|
||||
const _paq = (window._paq = window._paq || []) |
||||
|
||||
const profile = { |
||||
name: 'solcoder', |
||||
displayName: 'solcoder', |
||||
description: 'solcoder', |
||||
methods: ['code_generation', 'code_completion', "solidity_answer", "code_explaining", "code_insertion", "error_explaining"], |
||||
events: [], |
||||
maintainedBy: 'Remix', |
||||
} |
||||
type ChatEntry = [string, string]; |
||||
|
||||
enum BackendOPModel{ |
||||
DeepSeek, |
||||
CodeLLama, |
||||
Mistral |
||||
} |
||||
|
||||
const PromptBuilder = (inst, answr, modelop) => { |
||||
if (modelop === BackendOPModel.CodeLLama) return "" |
||||
if (modelop === BackendOPModel.DeepSeek) return "\n### INSTRUCTION:\n" + inst + "\n### RESPONSE:\n" + answr |
||||
if (modelop === BackendOPModel.Mistral) return "" |
||||
} |
||||
|
||||
export class SolCoder extends Plugin { |
||||
api_url: string |
||||
completion_url: string |
||||
solgpt_chat_history:ChatEntry[] |
||||
max_history = 7 |
||||
model_op = BackendOPModel.DeepSeek |
||||
|
||||
constructor() { |
||||
super(profile) |
||||
this.api_url = "https://solcoder.remixproject.org" |
||||
this.completion_url = "https://completion.remixproject.org" |
||||
this.solgpt_chat_history = [] |
||||
} |
||||
|
||||
pushChatHistory(prompt, result){ |
||||
const chat:ChatEntry = [prompt, result.data[0]] |
||||
this.solgpt_chat_history.push(chat) |
||||
if (this.solgpt_chat_history.length > this.max_history){this.solgpt_chat_history.shift()} |
||||
} |
||||
|
||||
async code_generation(prompt): Promise<any> { |
||||
this.emit("aiInfering") |
||||
this.call('layout', 'maximizeTerminal') |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'code_generation']) |
||||
|
||||
let result |
||||
try { |
||||
result = await( |
||||
await fetch(this.api_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]}), |
||||
}) |
||||
).json() |
||||
if ("error" in result){ |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result.error }) |
||||
return result |
||||
} |
||||
return result.data |
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'typewritererror', value: `Unable to get a response ${e.message}` }) |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
} |
||||
|
||||
async solidity_answer(prompt): Promise<any> { |
||||
this.emit("aiInfering") |
||||
this.call('layout', 'maximizeTerminal') |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'answering']) |
||||
|
||||
let result |
||||
try { |
||||
const main_prompt = this._build_solgpt_promt(prompt) |
||||
result = await( |
||||
await fetch(this.api_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data":[main_prompt, "solidity_answer", false,1000,0.9,0.8,50]}), |
||||
}) |
||||
).json() |
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'typewritererror', value: `Unable to get a response ${e.message}` }) |
||||
this.solgpt_chat_history = [] |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
if (result) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result.data[0] }) |
||||
this.pushChatHistory(prompt, result) |
||||
} else if (result.error) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "Error on request" }) |
||||
} |
||||
|
||||
} |
||||
|
||||
async code_explaining(prompt, context:string=""): Promise<any> { |
||||
this.emit("aiInfering") |
||||
this.call('layout', 'maximizeTerminal') |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'explaining']) |
||||
|
||||
let result |
||||
try { |
||||
result = await( |
||||
await fetch(this.api_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}), |
||||
}) |
||||
).json() |
||||
if (result) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result.data[0] }) |
||||
this.pushChatHistory(prompt, result) |
||||
} |
||||
return result.data[0] |
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'typewritererror', value: `Unable to get a response ${e.message}` }) |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
} |
||||
|
||||
async code_completion(prompt, options:SuggestOptions=null): Promise<any> { |
||||
this.emit("aiInfering") |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'code_completion']) |
||||
|
||||
let result |
||||
try { |
||||
result = await( |
||||
await fetch(this.completion_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data": !options? [ |
||||
prompt, // string in 'context_code' Textbox component
|
||||
"code_completion", |
||||
"", // string in 'comment' Textbox component
|
||||
false, // boolean in 'stream_result' Checkbox component
|
||||
30, // number (numeric value between 0 and 2000) in 'max_new_tokens' Slider component
|
||||
0.9, // number (numeric value between 0.01 and 1) in 'temperature' Slider component
|
||||
0.90, // number (numeric value between 0 and 1) in 'top_p' Slider component
|
||||
50, // number (numeric value between 1 and 200) in 'top_k' Slider component
|
||||
] : [ |
||||
prompt, |
||||
"code_completion", |
||||
"", |
||||
options.stream_result, |
||||
options.max_new_tokens, |
||||
options.temperature, |
||||
options.top_p, |
||||
options.top_k |
||||
]}), |
||||
}) |
||||
).json() |
||||
|
||||
if ("error" in result){ |
||||
return result |
||||
} |
||||
return result.data |
||||
|
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `Unable to get a response ${e.message}` }) |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
} |
||||
|
||||
async code_insertion(msg_pfx, msg_sfx): Promise<any> { |
||||
this.emit("aiInfering") |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'code_insertion']) |
||||
|
||||
let result |
||||
try { |
||||
result = await( |
||||
await fetch(this.completion_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data":[ |
||||
msg_pfx, // Text before current cursor line
|
||||
"code_insertion", |
||||
msg_sfx, // Text after current cursor line
|
||||
1024, |
||||
0.5, |
||||
0.92, |
||||
50 |
||||
]}), |
||||
}) |
||||
).json() |
||||
|
||||
if ("error" in result){ |
||||
return result |
||||
} |
||||
return result.data |
||||
|
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `Unable to get a response ${e.message}` }) |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
} |
||||
|
||||
async error_explaining(prompt): Promise<any> { |
||||
this.emit("aiInfering") |
||||
this.call('layout', 'maximizeTerminal') |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: `\n\nWaiting for RemixAI answer...` }) |
||||
_paq.push(['trackEvent', 'ai', 'solcoder', 'explaining']) |
||||
|
||||
let result |
||||
try { |
||||
result = await( |
||||
await fetch(this.api_url, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
body: JSON.stringify({ "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]}), |
||||
}) |
||||
).json() |
||||
if (result) { |
||||
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: result.data[0] }) |
||||
this.pushChatHistory(prompt, result) |
||||
} |
||||
return result.data[0] |
||||
} catch (e) { |
||||
this.call('terminal', 'log', { type: 'typewritererror', value: `Unable to get a response ${e.message}` }) |
||||
return |
||||
} finally { |
||||
this.emit("aiInferingDone") |
||||
} |
||||
} |
||||
|
||||
_build_solgpt_promt(user_promt:string){ |
||||
if (this.solgpt_chat_history.length === 0){ |
||||
return user_promt |
||||
} else { |
||||
let new_promt = "" |
||||
for (const [question, answer] of this.solgpt_chat_history) { |
||||
new_promt += PromptBuilder(question.split('sol-gpt')[1], answer, this.model_op) |
||||
} |
||||
// finaly
|
||||
new_promt = "sol-gpt " + new_promt + PromptBuilder(user_promt.split('sol-gpt')[1], "", this.model_op) |
||||
return new_promt |
||||
} |
||||
} |
||||
|
||||
} |
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,525 @@ |
||||
import path, { resolve } from 'path'; |
||||
const { spawn } = require('child_process'); // eslint-disable-line
|
||||
import fs from 'fs'; |
||||
import axios from "axios"; |
||||
import { EventEmitter } from 'events'; |
||||
import { ICompletions, IModel, IParams, InsertionParams, |
||||
CompletionParams, GenerationParams, ModelType, AIRequestType, |
||||
IStreamResponse, ChatHistory, downloadLatestReleaseExecutable, |
||||
buildSolgptPromt } from "@remix/remix-ai-core" |
||||
import { platform } from 'os'; |
||||
|
||||
class ServerStatusTimer { |
||||
private intervalId: NodeJS.Timeout | null = null; |
||||
public interval: number; |
||||
private task: () => void; |
||||
|
||||
constructor(task: () => void, interval: number) { |
||||
this.task = task; |
||||
this.interval = interval; |
||||
} |
||||
|
||||
start(): void { |
||||
if (this.intervalId === null) { |
||||
this.intervalId = setInterval(() => { |
||||
this.task(); |
||||
}, this.interval); |
||||
} |
||||
} |
||||
|
||||
stop(): void { |
||||
if (this.intervalId !== null) { |
||||
clearInterval(this.intervalId); |
||||
this.intervalId = null; |
||||
} |
||||
} |
||||
|
||||
isRunning(): boolean { |
||||
return this.intervalId !== null; |
||||
} |
||||
} |
||||
|
||||
export class InferenceManager implements ICompletions { |
||||
isReady: boolean = false |
||||
selectedModels: IModel[] = [] |
||||
event: EventEmitter |
||||
modelCacheDir: string = undefined |
||||
serverCacheDir: string = undefined |
||||
private inferenceProcess: any=null |
||||
port = 5501 |
||||
inferenceURL = 'http://127.0.0.1:' + this.port |
||||
private static instance=null |
||||
stateTimer: ServerStatusTimer |
||||
|
||||
private constructor(modelDir:string) { |
||||
this.event = new EventEmitter() |
||||
this.modelCacheDir = path.join(modelDir, 'models') |
||||
this.serverCacheDir = path.join(modelDir, 'inferenceServer') |
||||
this.stateTimer= new ServerStatusTimer(() => { this._processStatus()}, 20000) |
||||
} |
||||
|
||||
static getInstance(modelDir:string){ |
||||
if (!InferenceManager.instance) { |
||||
// check if ther is a process already running
|
||||
if (!modelDir) { |
||||
console.error('model directory is required to create InferenceManager instance') |
||||
return null |
||||
} |
||||
console.log('Creating new InferenceManager instance') |
||||
InferenceManager.instance = new InferenceManager(modelDir) |
||||
} |
||||
return InferenceManager.instance |
||||
} |
||||
|
||||
// init the backend with a new model
|
||||
async init(model:IModel) { |
||||
try { |
||||
await this._downloadModel(model) |
||||
|
||||
if (model.downloadPath === undefined) { |
||||
console.log('Model not downloaded or not found') |
||||
return |
||||
} |
||||
|
||||
console.log('Model downloaded at', model.downloadPath) |
||||
|
||||
if (this.inferenceProcess === null) await this._startServer() |
||||
|
||||
// check if resources are met before initializing the models
|
||||
this._handleResources(true) |
||||
|
||||
console.log('Initializing model request', model.modelType) |
||||
switch (model.modelType) { |
||||
case ModelType.CODE_COMPLETION_INSERTION || ModelType.CODE_COMPLETION:{ |
||||
console.log('Initializing Completion Model') |
||||
const res = await this._makeRequest('init_completion', { model_path: model.downloadPath }) |
||||
|
||||
console.log('code completion res is', res?.data?.status) |
||||
if (res?.data?.status === "success") { |
||||
this.isReady = true |
||||
console.log('Completion Model initialized successfully') |
||||
} else { |
||||
this.isReady = false |
||||
console.error('Error initializing the model', res.data?.error) |
||||
} |
||||
break; |
||||
} |
||||
|
||||
case ModelType.GENERAL:{ |
||||
const res = await this._makeRequest('init', { model_path: model.downloadPath }) |
||||
|
||||
if (res.data?.status === "success") { |
||||
this.isReady = true |
||||
console.log('General Model initialized successfully') |
||||
} else { |
||||
this.isReady = false |
||||
console.error('Error initializing the model', res.data?.error) |
||||
} |
||||
break; |
||||
} |
||||
} |
||||
|
||||
this.stateTimer.start() // double call on init completion and general
|
||||
this.selectedModels.push(model) |
||||
} catch (error) { |
||||
console.error('Error initializing the model', error) |
||||
this.isReady = false |
||||
InferenceManager.instance = null |
||||
} |
||||
} |
||||
|
||||
async _processStatus() { |
||||
|
||||
// check if the server is running
|
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const state = await axios.get(this.inferenceURL+"/state", options) |
||||
|
||||
if (!state.data?.status) { |
||||
console.log('Inference server not running') |
||||
InferenceManager.instance = null |
||||
this.stateTimer.interval += this.stateTimer.interval |
||||
|
||||
if (this.stateTimer.interval >= 60000) { |
||||
// attempt to restart the server
|
||||
console.log('Attempting to restart the server') |
||||
this.stopInferenceServer() |
||||
this._startServer() |
||||
this.stateTimer.interval = 20000 |
||||
} |
||||
} else { |
||||
// Server is running with successful request
|
||||
// console.log('Inference server is running')
|
||||
// console.log('completion is runnig', state.data?.completion)
|
||||
// console.log('general is runnig', state.data?.general)
|
||||
} |
||||
// this._handleResources()
|
||||
} |
||||
|
||||
async _handleResources(logger:boolean=false) { |
||||
// check resrource usage
|
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const res = await axios.get(this.inferenceURL+"/sys", options) |
||||
|
||||
if (res.data?.status) { |
||||
const max_memory = res.data.memory.total |
||||
const used_memory = res.data.memory.used |
||||
const memory_usage = res.data.memory.percent * 100 |
||||
const gpu_available = res.data.gpus |
||||
|
||||
for (const model of this.selectedModels) { |
||||
if (model.modelReqs.minSysMemory > max_memory) { |
||||
if (logger) console.warn('Insufficient memory for the model') |
||||
} |
||||
|
||||
if (model.modelReqs.minSysMemory > used_memory) { |
||||
if (logger) console.warn('Insufficient memory for the model') |
||||
} |
||||
if (model.modelReqs.GPURequired) { |
||||
if (gpu_available.length < 1) { |
||||
if (logger)console.warn('GPU requiredfor desktop inference but not available') |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
async _downloadModel(model:IModel): Promise<string> { |
||||
if (this.modelCacheDir === undefined) { |
||||
console.log('Model cache directory not provided') |
||||
return |
||||
} else { |
||||
const outputLocationPath = path.join(this.modelCacheDir, model.modelName); |
||||
console.log('output location path is', outputLocationPath) |
||||
if (fs.existsSync(outputLocationPath)) { |
||||
model.downloadPath = outputLocationPath |
||||
console.log('Model already exists in the output location', outputLocationPath); |
||||
return; |
||||
} |
||||
|
||||
console.log('Downloading model from', model.downloadUrl); |
||||
// Make a HEAD request to get the file size
|
||||
const { headers } = await axios.head(model.downloadUrl); |
||||
const totalSize = parseInt(headers['content-length'], 10); |
||||
|
||||
// Create a write stream to save the file
|
||||
const writer = fs.createWriteStream(outputLocationPath); |
||||
|
||||
// Start the file download
|
||||
const response = await axios({ |
||||
method: 'get', |
||||
url: model.downloadUrl, |
||||
responseType: 'stream' |
||||
}); |
||||
|
||||
let downloadedSize = 0; |
||||
|
||||
response.data.on('data', (chunk: Buffer) => { |
||||
downloadedSize += chunk.length; |
||||
const progress = (Number((downloadedSize / totalSize) * 100).toFixed(2)); |
||||
console.log(`Downloaded ${progress}%`); |
||||
this.event.emit('download', progress); |
||||
}); |
||||
|
||||
response.data.pipe(writer); |
||||
|
||||
this.event.emit('ready') |
||||
model.downloadPath = outputLocationPath |
||||
console.log('LLama Download complete'); |
||||
|
||||
return new Promise((resolve, reject) => { |
||||
writer.on('finish', resolve); |
||||
writer.on('error', reject); |
||||
}); |
||||
} |
||||
} |
||||
|
||||
private async _downloadInferenceServer() { |
||||
const execPath = this._getServerPath() |
||||
try { |
||||
if (fs.existsSync(execPath)) { |
||||
console.log('Inference server already downloaded') |
||||
return true |
||||
} else { |
||||
downloadLatestReleaseExecutable(process.platform, this.serverCacheDir) |
||||
if (fs.existsSync(execPath)) {return true } else {return false} |
||||
} |
||||
} catch (error) { |
||||
console.error('Error downloading Inference server:', error) |
||||
return false |
||||
} |
||||
} |
||||
|
||||
private _getServerPath() { |
||||
// get cpu arch
|
||||
const arch = process.arch |
||||
let exec_suffix = '' |
||||
|
||||
if (arch === 'x64') { |
||||
exec_suffix = 'x64' |
||||
} else if (arch === 'arm' || arch === 'arm64') { |
||||
exec_suffix = 'arm' |
||||
} else { |
||||
throw new Error('Unsupported CPU architecture') |
||||
} |
||||
|
||||
// get platform name and return the path to the python script
|
||||
let exec_name = '' |
||||
if (process.platform === 'win32') { |
||||
exec_name = 'InferenceServer-' + process.platform + '.exe' |
||||
} else if (process.platform === 'linux') { |
||||
exec_name = 'InferenceServer-' + process.platform + '_' + exec_suffix |
||||
} else if (process.platform === 'darwin') { |
||||
exec_name = 'InferenceServer-' + 'mac' |
||||
} else { |
||||
throw new Error('Unsupported platform') |
||||
} |
||||
return path.join(this.serverCacheDir, exec_name); |
||||
|
||||
} |
||||
|
||||
private async _handleExistingServer() { |
||||
// check if the server is already running, kill it
|
||||
try { |
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const state = await axios.get(this.inferenceURL+"/state", options) |
||||
|
||||
if (state.data?.status) { |
||||
console.log('Found existing Inference server running') |
||||
this.stopInferenceServer() |
||||
await axios.post(this.inferenceURL+"/kill", options) |
||||
} |
||||
} catch (error) { |
||||
// catch connection refused
|
||||
console.log('No existing Inference server running') |
||||
} |
||||
} |
||||
|
||||
private async _startServer() { |
||||
const serverAvailable = await this._downloadInferenceServer() |
||||
if (!serverAvailable) { |
||||
console.error('Inference server not available for this platform') |
||||
return |
||||
} |
||||
|
||||
// kill existing server if running
|
||||
this._handleExistingServer() |
||||
|
||||
return new Promise<void>((resolve, reject) => { |
||||
let serverPath = "" |
||||
try { |
||||
serverPath = this._getServerPath(); |
||||
fs.chmodSync(serverPath, '755') |
||||
} catch (error) { |
||||
console.error('Error script path:', error); |
||||
return reject(error) |
||||
} |
||||
|
||||
// Check if the file exists
|
||||
if (!fs.existsSync(serverPath)) { |
||||
return reject(new Error(`Inference server not found at ${serverPath}`)); |
||||
} |
||||
|
||||
// Check file permissions
|
||||
try { |
||||
fs.accessSync(serverPath, fs.constants.X_OK); |
||||
} catch (err) { |
||||
reject(new Error(`No execute permission on ${serverPath}`)); |
||||
} |
||||
|
||||
const spawnArgs = [this.port]; |
||||
|
||||
// console.log(`Spawning process: ${serverPath} ${spawnArgs.join(' ')}`);
|
||||
this.inferenceProcess = spawn(serverPath, spawnArgs); |
||||
|
||||
this.inferenceProcess.stdout.on('data', (data) => { |
||||
console.log(`Inference server output: ${data}`); |
||||
if (data.includes('Running on http://')) { |
||||
console.log('Inference server started successfully'); |
||||
resolve(); |
||||
} |
||||
}); |
||||
|
||||
this.inferenceProcess.stderr.on('data', (data) => { |
||||
console.error(`Inference log: ${data}`); |
||||
if (data.includes('Address already in use')) { |
||||
console.error(`Port ${this.port} is already in use. Please stop the existing server and try again`); |
||||
reject(new Error(`Port ${this.port} is already in use`)); |
||||
} |
||||
resolve(); |
||||
}); |
||||
|
||||
this.inferenceProcess.on('error', (err) => { |
||||
console.error('Failed to start Inference server:', err); |
||||
reject(err); |
||||
}); |
||||
|
||||
this.inferenceProcess.on('close', (code) => { |
||||
console.log(`Inference server process exited with code ${code}`); |
||||
if (code !== 0) { |
||||
reject(new Error(`Inference server exited with code ${code}`)); |
||||
} |
||||
}); |
||||
}); |
||||
} |
||||
|
||||
stopInferenceServer() { |
||||
if (this.inferenceProcess) { |
||||
this.inferenceProcess.kill(); |
||||
this.inferenceProcess = null; |
||||
} |
||||
} |
||||
|
||||
private async _makeInferenceRequest(endpoint, payload, rType:AIRequestType){ |
||||
try { |
||||
this.event.emit('onInference') |
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options) |
||||
|
||||
const userPrompt = payload[Object.keys(payload)[0]] |
||||
this.event.emit('onInferenceDone') |
||||
|
||||
if (response.data?.generatedText) { |
||||
if (rType === AIRequestType.GENERAL) { |
||||
ChatHistory.pushHistory(userPrompt, response.data.generatedText) |
||||
} |
||||
return response.data.generatedText |
||||
} else { return "" } |
||||
} catch (error) { |
||||
ChatHistory.clearHistory() |
||||
console.error('Error making request to Inference server:', error.message); |
||||
} |
||||
} |
||||
|
||||
private async _streamInferenceRequest(endpoint, payload){ |
||||
try { |
||||
this.event.emit('onInference') |
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const response = await axios({ |
||||
method: 'post', |
||||
url: `${this.inferenceURL}/${endpoint}`, |
||||
data: payload, |
||||
headers: { |
||||
"Content-Type": "application/json", |
||||
"Accept": "text/event-stream", |
||||
} |
||||
, responseType: 'stream' }); |
||||
|
||||
const userPrompt = payload[Object.keys(payload)[0]] |
||||
let resultText = "" |
||||
response.data.on('data', (chunk: Buffer) => { |
||||
try { |
||||
const parsedData = JSON.parse(chunk.toString()); |
||||
if (parsedData.isGenerating) { |
||||
this.event.emit('onStreamResult', parsedData.generatedText); |
||||
resultText = resultText + parsedData.generatedText |
||||
} else { |
||||
resultText = resultText + parsedData.generatedText |
||||
|
||||
// no additional check for streamed results
|
||||
ChatHistory.pushHistory(userPrompt, resultText) |
||||
return parsedData.generatedText |
||||
} |
||||
|
||||
} catch (error) { |
||||
ChatHistory.clearHistory() |
||||
console.error('Error parsing JSON:', error); |
||||
} |
||||
}); |
||||
|
||||
return "" // return empty string for now as payload is/will be handled in event
|
||||
} catch (error) { |
||||
ChatHistory.clearHistory() |
||||
console.error('Error making stream request to Inference server:', error.message); |
||||
} |
||||
finally { |
||||
this.event.emit('onInferenceDone') |
||||
} |
||||
} |
||||
|
||||
private async _makeRequest(endpoint, payload){ |
||||
// makes a simple request to the inference server
|
||||
try { |
||||
const options = { headers: { 'Content-Type': 'application/json', } } |
||||
const response = await axios.post(`${this.inferenceURL}/${endpoint}`, payload, options) |
||||
this.event.emit('onInferenceDone') |
||||
|
||||
return response |
||||
} catch (error) { |
||||
console.error('Error making request to Inference server:', error.message); |
||||
} |
||||
} |
||||
|
||||
async code_completion(context: any, params:IParams=CompletionParams): Promise<any> { |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return |
||||
} |
||||
|
||||
// as of now no prompt required
|
||||
const payload = { context_code: context, ...params } |
||||
return this._makeInferenceRequest('code_completion', payload, AIRequestType.COMPLETION) |
||||
} |
||||
|
||||
async code_insertion(msg_pfx: string, msg_sfx: string, params:IParams=InsertionParams): Promise<any> { |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return |
||||
} |
||||
const payload = { code_pfx:msg_pfx, code_sfx:msg_sfx, ...params } |
||||
return this._makeInferenceRequest('code_insertion', payload, AIRequestType.COMPLETION) |
||||
|
||||
} |
||||
|
||||
async code_generation(prompt: string, params:IParams=GenerationParams): Promise<any> { |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return |
||||
} |
||||
return this._makeInferenceRequest('code_generation', { prompt, ...params }, AIRequestType.GENERAL) |
||||
} |
||||
|
||||
async code_explaining(code:string, context:string, params:IParams=GenerationParams): Promise<any> { |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return |
||||
} |
||||
if (params.stream_result) { |
||||
return this._streamInferenceRequest('code_explaining', { code, context, ...params }) |
||||
} else { |
||||
return this._makeInferenceRequest('code_explaining', { code, context, ...params }, AIRequestType.GENERAL) |
||||
} |
||||
} |
||||
|
||||
async error_explaining(prompt: string, params:IParams=GenerationParams): Promise<any>{ |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return "" |
||||
} |
||||
if (params.stream_result) { |
||||
return this._streamInferenceRequest('error_explaining', { prompt, ...params }) |
||||
} else { |
||||
return this._makeInferenceRequest('error_explaining', { prompt, ...params }, AIRequestType.GENERAL) |
||||
} |
||||
} |
||||
|
||||
async solidity_answer(userPrompt: string, params:IParams=GenerationParams): Promise<any> { |
||||
if (!this.isReady) { |
||||
console.log('model not ready yet') |
||||
return |
||||
} |
||||
let modelOP = undefined |
||||
for (const model of this.selectedModels) { |
||||
if (model.modelType === ModelType.GENERAL) { |
||||
modelOP = model.modelOP |
||||
} |
||||
} |
||||
const prompt = buildSolgptPromt(userPrompt, modelOP) |
||||
|
||||
if (params.stream_result) { |
||||
return this._streamInferenceRequest('solidity_answer', { prompt, ...params }) |
||||
} else { |
||||
return this._makeInferenceRequest('solidity_answer', { prompt, ...params }, AIRequestType.GENERAL) |
||||
} |
||||
} |
||||
|
||||
} |
@ -0,0 +1,115 @@ |
||||
import { ElectronBasePlugin, ElectronBasePluginClient } from "@remixproject/plugin-electron" |
||||
import { Profile } from "@remixproject/plugin-utils" |
||||
|
||||
// use remix ai core
|
||||
import { InferenceManager } from "../lib/InferenceServerManager" |
||||
import { cacheDir } from "../utils/config" |
||||
import { RemoteInferencer } from "@remix/remix-ai-core" |
||||
|
||||
// import { isE2E } from "../main";
|
||||
|
||||
const profile = { |
||||
name: 'remixAID', |
||||
displayName: 'RemixAI Desktop', |
||||
maintainedBy: 'Remix', |
||||
description: 'RemixAI provides AI services to Remix IDE Desktop.', |
||||
kind: '', |
||||
documentation: 'https://remix-ide.readthedocs.io/en/latest/remixai.html', |
||||
} |
||||
|
||||
export class RemixAIDesktopPlugin extends ElectronBasePlugin { |
||||
clients: RemixAIDesktopPluginClient[] = [] |
||||
constructor() { |
||||
super(profile, clientProfile, RemixAIDesktopPluginClient) |
||||
this.methods = [...super.methods] |
||||
} |
||||
} |
||||
|
||||
const clientProfile: Profile = { |
||||
name: 'remixAID', |
||||
displayName: 'RemixAI Desktop', |
||||
maintainedBy: 'Remix', |
||||
description: 'RemixAI provides AI services to Remix IDE Desktop.', |
||||
kind: '', |
||||
documentation: 'https://remix-ide.readthedocs.io/en/latest/remixai.html', |
||||
methods: ['initializeModelBackend', 'code_completion', 'code_insertion', 'code_generation', 'code_explaining', 'error_explaining', 'solidity_answer'] |
||||
} |
||||
|
||||
class RemixAIDesktopPluginClient extends ElectronBasePluginClient { |
||||
readonly modelCacheDir: string = cacheDir |
||||
desktopInferencer:InferenceManager | RemoteInferencer = null |
||||
|
||||
constructor (webContentsId: number, profile: Profile){ |
||||
super(webContentsId, profile) |
||||
} |
||||
|
||||
async onActivation(): Promise<void> { |
||||
this.onload(() => { |
||||
}) |
||||
} |
||||
|
||||
async enable (){ |
||||
console.log('Remix AI desktop plugin enabled') |
||||
this.emit('enabled') |
||||
} |
||||
|
||||
async initializeModelBackend(local, generalModel?, completionModel?){ |
||||
if (!local){ |
||||
this.desktopInferencer = new RemoteInferencer() |
||||
} else if (generalModel || completionModel){ |
||||
if (!this.desktopInferencer){ |
||||
this.desktopInferencer = InferenceManager.getInstance(this.modelCacheDir) |
||||
if (this.desktopInferencer instanceof InferenceManager && generalModel) await this.desktopInferencer.init(generalModel) |
||||
if (this.desktopInferencer instanceof InferenceManager && completionModel) await this.desktopInferencer.init(completionModel) |
||||
} else { |
||||
return false // do not set event listener twice
|
||||
} |
||||
} else { |
||||
throw new Error('No model provided') |
||||
} |
||||
|
||||
// set event listeners
|
||||
this.desktopInferencer.event.on('onStreamResult', (data) => { |
||||
this.emit('onStreamResult', data) |
||||
}) |
||||
this.desktopInferencer.event.on('onInference', () => { |
||||
this.emit('onInference') |
||||
}) |
||||
this.desktopInferencer.event.on('onInferenceDone', () => { |
||||
this.emit('onInferenceDone') |
||||
}) |
||||
return true |
||||
} |
||||
|
||||
async code_completion(context: any) { |
||||
// use general purpose model
|
||||
return this.desktopInferencer.code_completion(context) |
||||
} |
||||
|
||||
async code_insertion(msg_pfx: string, msg_sfx: string) { |
||||
return this.desktopInferencer.code_insertion(msg_pfx, msg_sfx) |
||||
} |
||||
|
||||
async code_generation(prompt: string) { |
||||
return this.desktopInferencer.code_generation(prompt) |
||||
} |
||||
|
||||
async code_explaining(code:string, context?:string) { |
||||
return this.desktopInferencer.code_explaining(code, context) |
||||
} |
||||
|
||||
async error_explaining(prompt: string) { |
||||
return this.desktopInferencer.error_explaining(prompt) |
||||
} |
||||
|
||||
async solidity_answer(prompt: string) { |
||||
return this.desktopInferencer.solidity_answer(prompt) |
||||
} |
||||
|
||||
changemodel(newModel: any){ |
||||
/// dereference the current static inference object
|
||||
/// set new one
|
||||
} |
||||
|
||||
} |
||||
|
@ -0,0 +1 @@ |
||||
{ "extends": "../../.eslintrc", "rules": {}, "ignorePatterns": ["!**/*"] } |
@ -0,0 +1,7 @@ |
||||
# remix-ai-core |
||||
|
||||
[![npm version](https://badge.fury.io/js/%40remix-project%2Fremixd.svg)](https://www.npmjs.com/package/@remix-project/remixd) |
||||
[![npm](https://img.shields.io/npm/dt/@remix-project/remixd.svg?label=Total%20Downloads&logo=npm)](https://www.npmjs.com/package/@remix-project/remixd) |
||||
[![npm](https://img.shields.io/npm/dw/@remix-project/remixd.svg?logo=npm)](https://www.npmjs.com/package/@remix-project/remixd) |
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,32 @@ |
||||
{ |
||||
"name": "remix-ai-core", |
||||
"$schema": "../../node_modules/nx/schemas/project-schema.json", |
||||
"sourceRoot": "libs/remix-ai-core/src", |
||||
"projectType": "library", |
||||
"implicitDependencies": [ |
||||
], |
||||
"targets": { |
||||
"build": { |
||||
"executor": "@nrwl/js:tsc", |
||||
"outputs": ["{options.outputPath}"], |
||||
"options": { |
||||
"outputPath": "dist/libs/remix-ai-core", |
||||
"main": "libs/remix-ai-core/src/index.ts", |
||||
"tsConfig": "libs/remix-ai-core/tsconfig.lib.json", |
||||
"updateBuildableProjectDepsInPackageJson": false, |
||||
"assets": [ |
||||
"libs/remix-ai-core/*.md" |
||||
] |
||||
} |
||||
}, |
||||
"lint": { |
||||
"executor": "@nrwl/linter:eslint", |
||||
"outputs": ["{options.outputFile}"], |
||||
"options": { |
||||
"lintFilePatterns": ["libs/remix-ai-core/**/*.ts"], |
||||
"eslintConfig": "libs/remix-ai-core/.eslintrc" |
||||
} |
||||
} |
||||
}, |
||||
"tags": [] |
||||
} |
@ -0,0 +1,29 @@ |
||||
// interactive code explaining and highlight security vunerabilities
|
||||
import * as fs from 'fs'; |
||||
|
||||
class CodeExplainAgent { |
||||
private codebase: string[]; // list of code base file
|
||||
public currentFile: string; |
||||
|
||||
constructor(codebasePath: string) { |
||||
// git or fs
|
||||
this.codebase = this.loadCodebase(codebasePath); |
||||
} |
||||
|
||||
private loadCodebase(path: string): string[] { |
||||
const files = fs.readdirSync(path); |
||||
return files |
||||
.filter(file => file.endsWith('.ts')) |
||||
.flatMap(file => fs.readFileSync(`${path}/${file}`, 'utf-8').split('\n')); |
||||
} |
||||
|
||||
public update(currentFile, lineNumber){ |
||||
|
||||
} |
||||
|
||||
public getExplanations(currentLine: string, numSuggestions: number = 3): string[] { |
||||
// process the code base explaining the current file and highlight some details
|
||||
const suggestions: string[] = []; |
||||
return suggestions; |
||||
} |
||||
} |
@ -0,0 +1,23 @@ |
||||
import * as fs from 'fs'; |
||||
|
||||
class CodeCompletionAgent { |
||||
private codebase: string[]; |
||||
|
||||
constructor(codebasePath: string) { |
||||
// git or fs
|
||||
this.codebase = this.loadCodebase(codebasePath); |
||||
} |
||||
|
||||
private loadCodebase(path: string): string[] { |
||||
const files = fs.readdirSync(path); |
||||
return files |
||||
.filter(file => file.endsWith('.ts')) |
||||
.flatMap(file => fs.readFileSync(`${path}/${file}`, 'utf-8').split('\n')); |
||||
} |
||||
|
||||
public getSuggestions(currentLine: string, numSuggestions: number = 3): string[] { |
||||
const suggestions: string[] = []; |
||||
// get `numSuggestions` from the llm
|
||||
return suggestions; |
||||
} |
||||
} |
@ -0,0 +1,29 @@ |
||||
// security checks
|
||||
import * as fs from 'fs'; |
||||
|
||||
class SecurityAgent { |
||||
private codebase: string[]; // list of code base file
|
||||
public currentFile: string; |
||||
|
||||
constructor(codebasePath: string) { |
||||
// git or fs
|
||||
this.codebase = this.loadCodebase(codebasePath); |
||||
} |
||||
|
||||
private loadCodebase(path: string): string[] { |
||||
const files = fs.readdirSync(path); |
||||
return files |
||||
.filter(file => file.endsWith('.ts')) |
||||
.flatMap(file => fs.readFileSync(`${path}/${file}`, 'utf-8').split('\n')); |
||||
} |
||||
|
||||
public update(currentFile, lineNumber){ |
||||
|
||||
} |
||||
|
||||
public getRecommendations(currentLine: string, numSuggestions: number = 3): string[] { |
||||
// process the code base highlighting security vunerabilities and deliver recommendations
|
||||
const suggestions: string[] = []; |
||||
return suggestions; |
||||
} |
||||
} |
@ -0,0 +1,55 @@ |
||||
import axios from 'axios'; |
||||
import fs from 'fs'; |
||||
import path from 'path'; |
||||
|
||||
interface Asset { |
||||
name: string; |
||||
browser_download_url: string; |
||||
} |
||||
|
||||
interface Release { |
||||
assets: Asset[]; |
||||
} |
||||
|
||||
const owner = 'remix-project-org' |
||||
const repo = 'remix_ai_tools' |
||||
async function getLatestRelease(owner: string, repo: string): Promise<Release> { |
||||
const url = `https://api.github.com/repos/${owner}/${repo}/releases/latest`; |
||||
const response = await axios.get(url); |
||||
return response.data; |
||||
} |
||||
|
||||
async function downloadFile(url: string, filePath: string): Promise<void> { |
||||
const writer = fs.createWriteStream(filePath); |
||||
const response = await axios({ |
||||
url, |
||||
method: 'GET', |
||||
responseType: 'stream' |
||||
}); |
||||
response.data.pipe(writer); |
||||
|
||||
return new Promise((resolve, reject) => { |
||||
writer.on('finish', resolve); |
||||
writer.on('error', reject); |
||||
}); |
||||
} |
||||
|
||||
export async function downloadLatestReleaseExecutable(platform: string, outputDir: string): Promise<void> { |
||||
try { |
||||
const release = await getLatestRelease(owner, repo); |
||||
const executables = release.assets.filter(asset => |
||||
asset.name.includes(platform) |
||||
); |
||||
|
||||
console.log(`Downloading executables for ${platform}..., ${executables} `); |
||||
|
||||
for (const executable of executables) { |
||||
const filePath = path.join(outputDir, executable.name); |
||||
console.log(`Downloading ${executable.name}...`); |
||||
await downloadFile(executable.browser_download_url, filePath); |
||||
console.log(`Downloaded ${executable.name}`); |
||||
} |
||||
} catch (error) { |
||||
console.error('Error downloading executables:', error); |
||||
} |
||||
} |
@ -0,0 +1,20 @@ |
||||
'use strict' |
||||
|
||||
import { IModel, IModelResponse, IModelRequest, InferenceModel, ICompletions, |
||||
IParams, ChatEntry, AIRequestType, IRemoteModel, |
||||
RemoteBackendOPModel, IStreamResponse } from './types/types' |
||||
import { ModelType } from './types/constants' |
||||
import { DefaultModels, InsertionParams, CompletionParams, GenerationParams } from './types/models' |
||||
import { getCompletionPrompt, getInsertionPrompt } from './prompts/completionPrompts' |
||||
import { buildSolgptPromt, PromptBuilder } from './prompts/promptBuilder' |
||||
import { RemoteInferencer } from './inferencers/remote/remoteInference' |
||||
import { ChatHistory } from './prompts/chat' |
||||
import { downloadLatestReleaseExecutable } from './helpers/inferenceServerReleases' |
||||
|
||||
export { |
||||
IModel, IModelResponse, IModelRequest, InferenceModel, |
||||
ModelType, DefaultModels, ICompletions, IParams, IRemoteModel, |
||||
getCompletionPrompt, getInsertionPrompt, IStreamResponse, buildSolgptPromt, |
||||
RemoteInferencer, InsertionParams, CompletionParams, GenerationParams, |
||||
ChatEntry, AIRequestType, RemoteBackendOPModel, ChatHistory, downloadLatestReleaseExecutable |
||||
} |
@ -0,0 +1,141 @@ |
||||
import { ICompletions, IParams, AIRequestType, RemoteBackendOPModel } from "../../types/types"; |
||||
import { buildSolgptPromt } from "../../prompts/promptBuilder"; |
||||
import axios from "axios"; |
||||
import EventEmitter from "events"; |
||||
import { ChatHistory } from "../../prompts/chat"; |
||||
|
||||
const defaultErrorMessage = `Unable to get a response from AI server` |
||||
|
||||
export class RemoteInferencer implements ICompletions { |
||||
api_url: string |
||||
completion_url: string |
||||
max_history = 7 |
||||
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
|
||||
event: EventEmitter |
||||
|
||||
constructor(apiUrl?:string, completionUrl?:string) { |
||||
this.api_url = apiUrl!==undefined ? apiUrl: "https://solcoder.remixproject.org" |
||||
this.completion_url = completionUrl!==undefined ? completionUrl : "https://completion.remixproject.org" |
||||
this.event = new EventEmitter() |
||||
} |
||||
|
||||
private async _makeRequest(data, rType:AIRequestType){ |
||||
this.event.emit("onInference") |
||||
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
||||
const userPrompt = data.data[0] |
||||
|
||||
try { |
||||
const result = await axios(requesURL, { |
||||
method: 'POST', |
||||
headers: { |
||||
Accept: 'application/json', |
||||
'Content-Type': 'application/json', |
||||
}, |
||||
data: JSON.stringify(data), |
||||
}) |
||||
|
||||
switch (rType) { |
||||
case AIRequestType.COMPLETION: |
||||
if (result.statusText === "OK") |
||||
return result.data.data[0] |
||||
else { |
||||
return defaultErrorMessage |
||||
} |
||||
case AIRequestType.GENERAL: |
||||
if (result.statusText === "OK") { |
||||
const resultText = result.data.data[0] |
||||
ChatHistory.pushHistory(userPrompt, resultText) |
||||
return resultText |
||||
} else { |
||||
return defaultErrorMessage |
||||
} |
||||
} |
||||
|
||||
} catch (e) { |
||||
ChatHistory.clearHistory() |
||||
console.error('Error making request to Inference server:', e.message) |
||||
return e |
||||
} |
||||
finally { |
||||
this.event.emit("onInferenceDone") |
||||
} |
||||
} |
||||
|
||||
private async _streamInferenceRequest(data, rType:AIRequestType){ |
||||
try { |
||||
this.event.emit('onInference') |
||||
const requesURL = rType === AIRequestType.COMPLETION ? this.completion_url : this.api_url |
||||
const userPrompt = data.data[0] |
||||
const response = await axios({ |
||||
method: 'post', |
||||
url: requesURL, |
||||
data: data, |
||||
headers: { 'Content-Type': 'application/json', "Accept": "text/event-stream" }, |
||||
responseType: 'stream' |
||||
}); |
||||
|
||||
let resultText = "" |
||||
response.data.on('data', (chunk: Buffer) => { |
||||
try { |
||||
const parsedData = JSON.parse(chunk.toString()); |
||||
if (parsedData.isGenerating) { |
||||
this.event.emit('onStreamResult', parsedData.generatedText); |
||||
resultText = resultText + parsedData.generatedText |
||||
} else { |
||||
// stream generation is complete
|
||||
resultText = resultText + parsedData.generatedText |
||||
ChatHistory.pushHistory(userPrompt, resultText) |
||||
return parsedData.generatedText |
||||
} |
||||
} catch (error) { |
||||
console.error('Error parsing JSON:', error); |
||||
ChatHistory.clearHistory() |
||||
} |
||||
}); |
||||
|
||||
return "" // return empty string for now as handled in event
|
||||
} catch (error) { |
||||
ChatHistory.clearHistory() |
||||
console.error('Error making stream request to Inference server:', error.message); |
||||
} |
||||
finally { |
||||
this.event.emit('onInferenceDone') |
||||
} |
||||
} |
||||
|
||||
async code_completion(prompt, options:IParams=null): Promise<any> { |
||||
const payload = !options? |
||||
{ "data": [prompt, "code_completion", "", false, 30, 0.9, 0.90, 50]} : |
||||
{ "data": [prompt, "code_completion", "", options.stream_result, |
||||
options.max_new_tokens, options.temperature, options.top_p, options.top_k] |
||||
} |
||||
|
||||
return this._makeRequest(payload, AIRequestType.COMPLETION) |
||||
} |
||||
|
||||
async code_insertion(msg_pfx, msg_sfx): Promise<any> { |
||||
const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]} |
||||
return this._makeRequest(payload, AIRequestType.COMPLETION) |
||||
} |
||||
|
||||
async code_generation(prompt): Promise<any> { |
||||
const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]} |
||||
return this._makeRequest(payload, AIRequestType.COMPLETION) |
||||
} |
||||
|
||||
async solidity_answer(prompt): Promise<any> { |
||||
const main_prompt = buildSolgptPromt(prompt, this.model_op) |
||||
const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]} |
||||
return this._makeRequest(payload, AIRequestType.GENERAL) |
||||
} |
||||
|
||||
async code_explaining(prompt, context:string=""): Promise<any> { |
||||
const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]} |
||||
return this._makeRequest(payload, AIRequestType.GENERAL) |
||||
} |
||||
|
||||
async error_explaining(prompt): Promise<any> { |
||||
const payload = { "data":[prompt, "error_explaining", false,2000,0.9,0.8,50]} |
||||
return this._makeRequest(payload, AIRequestType.GENERAL) |
||||
} |
||||
} |
@ -0,0 +1,21 @@ |
||||
import { ChatEntry } from "../types/types" |
||||
|
||||
export abstract class ChatHistory{ |
||||
|
||||
private static chatEntries:ChatEntry[] = [] |
||||
static queuSize:number = 7 // change the queue size wrt the GPU size
|
||||
|
||||
public static pushHistory(prompt, result){ |
||||
const chat:ChatEntry = [prompt, result] |
||||
this.chatEntries.push(chat) |
||||
if (this.chatEntries.length > this.queuSize){this.chatEntries.shift()} |
||||
} |
||||
|
||||
public static getHistory(){ |
||||
return this.chatEntries |
||||
} |
||||
|
||||
public static clearHistory(){ |
||||
this.chatEntries = [] |
||||
} |
||||
} |
@ -0,0 +1,18 @@ |
||||
import { COMPLETION_SYSTEM_PROMPT } from "../types/constants"; |
||||
import { IModel } from "../types/types"; |
||||
|
||||
export const getInsertionPrompt = (model:IModel, msg_pfx, msg_sfx) => { |
||||
if ((model.modelType === 'code_completion_insertion') && (model.modelName.toLocaleLowerCase().includes('deepseek'))){ |
||||
return `'<|fim▁begin|>' ${msg_pfx} '<|fim▁hole|>' ${msg_sfx} '<|fim▁end|>'` |
||||
} |
||||
else { |
||||
// return error model not supported yet
|
||||
|
||||
} |
||||
} |
||||
|
||||
export const getCompletionPrompt = (model:IModel, context) => { |
||||
if ((model.modelType === 'code_completion') && (model.modelName.toLocaleLowerCase().includes('deepseek'))){ |
||||
return `{COMPLETION_SYSTEM_PROMPT} \n### Instruction:\n{context}\n ### Response: ` |
||||
} |
||||
} |
@ -0,0 +1,28 @@ |
||||
import { RemoteBackendOPModel } from "../types/types" |
||||
import { ChatHistory } from "./chat" |
||||
|
||||
export const PromptBuilder = (inst, answr, modelop) => { |
||||
if (modelop === RemoteBackendOPModel.CODELLAMA) return `<|start_header_id|>user<|end_header_id|>${inst}<|eot_id|><|start_header_id|>assistant<|end_header_id|> ${answr}` |
||||
if (modelop === RemoteBackendOPModel.DEEPSEEK) return "\n### INSTRUCTION:\n" + inst + "\n### RESPONSE:\n" + answr |
||||
if (modelop === RemoteBackendOPModel.MISTRAL) return "" |
||||
} |
||||
|
||||
export const buildSolgptPromt = (userPrompt:string, modelOP:RemoteBackendOPModel) => { |
||||
if (modelOP === undefined) { |
||||
console.log('WARNING: modelOP is undefined. Provide a valide model OP for chat history') |
||||
return userPrompt |
||||
} |
||||
if (ChatHistory.getHistory().length === 0){ |
||||
return userPrompt |
||||
} else { |
||||
let newPrompt = "" |
||||
for (const [question, answer] of ChatHistory.getHistory()) { |
||||
if (question.startsWith('sol-gpt')) newPrompt += PromptBuilder(question.split('sol-gpt')[1], answer, modelOP) |
||||
else if (question.startsWith('gpt')) newPrompt += PromptBuilder(question.split('gpt')[1], answer, modelOP) |
||||
else newPrompt += PromptBuilder(question, answer, modelOP) |
||||
} |
||||
// finaly
|
||||
newPrompt = "sol-gpt " + newPrompt + PromptBuilder(userPrompt.split('gpt')[1], "", modelOP) |
||||
return newPrompt |
||||
} |
||||
} |
@ -0,0 +1,9 @@ |
||||
/// constants for modelselection
|
||||
|
||||
export enum ModelType { |
||||
CODE_COMPLETION = 'code_completion', |
||||
GENERAL = 'general', |
||||
CODE_COMPLETION_INSERTION = 'code_completion_insertion', |
||||
} |
||||
|
||||
export const COMPLETION_SYSTEM_PROMPT = "You are a Solidity AI Assistant that complete user code with provided context. You provide accurate solution and always answer as helpfully as possible, while being safe. You only provide code using this context:\n" |
@ -0,0 +1,81 @@ |
||||
// create a list of supported models
|
||||
// create a function getModels returning a list of all supported models
|
||||
// create a function getModel returning a model by its name
|
||||
|
||||
import { IModel, IParams, RemoteBackendOPModel } from './types'; |
||||
import { ModelType } from './constants'; |
||||
|
||||
const DefaultModels = (): IModel[] => { |
||||
const model1:IModel = { |
||||
name: 'DeepSeek', |
||||
modelOP: RemoteBackendOPModel.DEEPSEEK, |
||||
task: 'text-generation', |
||||
modelName: 'deepseek-coder-6.7b-instruct-q4.gguf', |
||||
downloadUrl: 'https://drive.usercontent.google.com/download?id=13sz7lnEhpQ6EslABpAKl2HWZdtX3d9Nh&confirm=xxx', |
||||
modelType: ModelType.GENERAL, |
||||
modelReqs: { backend: 'llamacpp', minSysMemory: 8, GPURequired: false, MinGPUVRAM: 8 } |
||||
}; |
||||
const model2: IModel = { |
||||
name: 'DeepSeek', |
||||
modelOP: RemoteBackendOPModel.DEEPSEEK, |
||||
task: 'text-generation', |
||||
modelName: 'deepseek-coder-1.3b-base-q4.gguf', |
||||
downloadUrl: 'https://drive.usercontent.google.com/download?id=13UNJuB908kP0pWexrT5n8i2LrhFaWo92&confirm=xxx', |
||||
modelType: ModelType.CODE_COMPLETION_INSERTION, |
||||
modelReqs: { backend: 'llamacpp', minSysMemory: 2, GPURequired: false, MinGPUVRAM: 2 } |
||||
}; |
||||
|
||||
const model3: IModel = { |
||||
name: 'llaama3.1_8B', |
||||
modelOP: RemoteBackendOPModel.CODELLAMA, |
||||
task: 'text-generation', |
||||
modelName: 'llama3_1_8B-q4_0.gguf', |
||||
downloadUrl: 'https://drive.usercontent.google.com/download?id=1I376pl8uORDnUIjfNuqhExK4NCiH3F12&confirm=xxx', |
||||
modelType: ModelType.GENERAL, |
||||
modelReqs: { backend: 'llamacpp', minSysMemory: 8, GPURequired: false, MinGPUVRAM: 8 } |
||||
}; |
||||
|
||||
const model4: IModel = { |
||||
name: 'llaama3.1_8B_instruct', |
||||
modelOP: RemoteBackendOPModel.CODELLAMA, |
||||
task: 'text-generation', |
||||
modelName: 'llama3_1_8B-q4_0_instruct.gguf', |
||||
downloadUrl: 'https://drive.usercontent.google.com/download?id=1P-MEH7cPxaR20v7W1qbOEPBzgiY2RDLx&confirm=xxx', |
||||
modelType: ModelType.GENERAL, |
||||
modelReqs: { backend: 'llamacpp', minSysMemory: 8, GPURequired: false, MinGPUVRAM: 8 } |
||||
}; |
||||
|
||||
return [model1, model2, model3, model4]; |
||||
} |
||||
|
||||
const getModel = async (name: string): Promise<IModel | undefined> => { |
||||
return DefaultModels().find(model => model.name === name); |
||||
} |
||||
|
||||
const loadModel = async (modelname: string): Promise<void> => { |
||||
console.log(`Loading model ${modelname}`); |
||||
} |
||||
|
||||
const CompletionParams:IParams = { |
||||
temperature: 0.8, |
||||
topK: 40, |
||||
topP: 0.92, |
||||
max_new_tokens: 15, |
||||
} |
||||
|
||||
const InsertionParams:IParams = { |
||||
temperature: 0.8, |
||||
topK: 40, |
||||
topP: 0.92, |
||||
max_new_tokens: 150, |
||||
} |
||||
|
||||
const GenerationParams:IParams = { |
||||
temperature: 0.5, |
||||
topK: 40, |
||||
topP: 0.92, |
||||
max_new_tokens: 2000, |
||||
stream_result: false, |
||||
} |
||||
|
||||
export { DefaultModels, CompletionParams, InsertionParams, GenerationParams } |
@ -0,0 +1,87 @@ |
||||
// model implementation for the model selection component
|
||||
|
||||
import exp from 'constants'; |
||||
import { ModelType } from './constants'; |
||||
|
||||
export interface IModelRequirements{ |
||||
backend: string, |
||||
minSysMemory: number, |
||||
GPURequired: boolean, |
||||
MinGPUVRAM: number, |
||||
} |
||||
|
||||
export interface IModel { |
||||
name: string; |
||||
task: string; |
||||
downloadUrl: string; |
||||
modelName: string; |
||||
modelType: ModelType; |
||||
modelReqs: IModelRequirements; |
||||
downloadPath?: string; |
||||
modelOP?: RemoteBackendOPModel; |
||||
|
||||
} |
||||
export interface IRemoteModel { |
||||
completionUrl: string; |
||||
apiUrl: string; |
||||
} |
||||
|
||||
export interface IModelResponse { |
||||
output: string; |
||||
error: string; |
||||
success: boolean; |
||||
model: IModel; |
||||
} |
||||
|
||||
export interface IStreamResponse { |
||||
generatedText: string; |
||||
isGenerating: boolean; |
||||
} |
||||
|
||||
export interface IModelRequest { |
||||
input: string; |
||||
model: IModel; |
||||
} |
||||
|
||||
export interface InferenceModel { |
||||
model: IModel; |
||||
location: string; |
||||
isRemote: boolean; |
||||
} |
||||
|
||||
export interface ICompletions{ |
||||
code_completion(context, params:IParams): Promise<any>; |
||||
code_insertion(msg_pfx, msg_sfx, params:IParams): Promise<any>; |
||||
} |
||||
|
||||
export interface IParams { |
||||
temperature?: number; |
||||
max_new_tokens?: number; |
||||
repetition_penalty?: number; |
||||
repeatPenalty?:any |
||||
no_repeat_ngram_size?: number; |
||||
num_beams?: number; |
||||
num_return_sequences?: number; |
||||
top_k?: number; |
||||
top_p?: number; |
||||
stream_result?: boolean; |
||||
return_full_text?: boolean; |
||||
nThreads?: number; |
||||
nTokPredict?: number; |
||||
topK?: number; |
||||
topP?: number; |
||||
temp?: number; |
||||
} |
||||
|
||||
export enum AIRequestType { |
||||
COMPLETION, |
||||
GENERAL |
||||
} |
||||
|
||||
export type ChatEntry = [string, string]; |
||||
|
||||
export enum RemoteBackendOPModel{ |
||||
DEEPSEEK, |
||||
CODELLAMA, |
||||
MISTRAL |
||||
} |
@ -0,0 +1,10 @@ |
||||
{ |
||||
"extends": "../../tsconfig.base.json", |
||||
"compilerOptions": { |
||||
"types": ["node"], |
||||
"module": "commonjs", |
||||
"esModuleInterop": true, |
||||
"outDir": "./dist", |
||||
}, |
||||
"include": ["**/*.ts"] |
||||
} |
@ -0,0 +1,15 @@ |
||||
{ |
||||
"extends": "./tsconfig.json", |
||||
"compilerOptions": { |
||||
"module": "commonjs", |
||||
"outDir": "../../dist/out-tsc", |
||||
"declaration": true, |
||||
"rootDir": "./src", |
||||
"types": ["node"] |
||||
}, |
||||
"exclude": [ |
||||
"**/*.spec.ts", |
||||
"test/" |
||||
], |
||||
"include": ["**/*.ts"] |
||||
} |
@ -0,0 +1,23 @@ |
||||
import { IParams } from "@remix/remix-ai-core"; |
||||
import { StatusEvents } from "@remixproject/plugin-utils"; |
||||
|
||||
export interface IRemixAID { |
||||
events: { |
||||
activated():void, |
||||
onInference():void, |
||||
onInferenceDone():void, |
||||
onStreamResult(streamText: string):void, |
||||
|
||||
} & StatusEvents, |
||||
methods: { |
||||
code_completion(context: string): Promise<string>
|
||||
code_insertion(msg_pfx: string, msg_sfx: string): Promise<string>, |
||||
code_generation(prompt: string): Promise<string | null>, |
||||
code_explaining(code: string, context?: string): Promise<string | null>, |
||||
error_explaining(prompt: string): Promise<string | null>, |
||||
solidity_answer(prompt: string): Promise<string | null>, |
||||
initializeModelBackend(local: boolean, generalModel?, completionModel?): Promise<boolean>, |
||||
chatPipe(pipeMessage: string): Promise<void>, |
||||
ProcessChatRequestBuffer(params:IParams): Promise<void>, |
||||
} |
||||
} |
@ -0,0 +1,21 @@ |
||||
import { IModel, IParams, IRemoteModel } from "@remix/remix-ai-core"; |
||||
import { StatusEvents } from "@remixproject/plugin-utils"; |
||||
|
||||
export interface IRemixAI { |
||||
events: { |
||||
onStreamResult(streamText: string): Promise<void>, |
||||
activated(): Promise<void>, |
||||
} & StatusEvents, |
||||
methods: { |
||||
code_completion(context: string): Promise<string>
|
||||
code_insertion(msg_pfx: string, msg_sfx: string): Promise<string>, |
||||
code_generation(prompt: string): Promise<string | null>, |
||||
code_explaining(code: string, context?: string): Promise<string | null>, |
||||
error_explaining(prompt: string): Promise<string | null>, |
||||
solidity_answer(prompt: string): Promise<string | null>, |
||||
initializeModelBackend(local: boolean, generalModel?, completionModel?): Promise<void>, |
||||
chatPipe(pipeMessage: string): Promise<void>, |
||||
ProcessChatRequestBuffer(params:IParams): Promise<void>, |
||||
initialize(model1?:IModel, model2?:IModel, remoteModel?:IRemoteModel, useRemote?:boolean): Promise<void>, |
||||
} |
||||
} |
@ -1,31 +0,0 @@ |
||||
export class CompletionTimer { |
||||
private duration: number; |
||||
private timerId: NodeJS.Timeout | null = null; |
||||
private callback: () => void; |
||||
|
||||
constructor(duration: number, callback: () => void) { |
||||
this.duration = duration; |
||||
this.callback = callback; |
||||
} |
||||
|
||||
start() { |
||||
if (this.timerId) { |
||||
console.error("Timer is already running."); |
||||
return; |
||||
} |
||||
|
||||
this.timerId = setTimeout(() => { |
||||
this.callback(); |
||||
this.timerId = null; |
||||
}, this.duration); |
||||
} |
||||
|
||||
stop() { |
||||
if (this.timerId) { |
||||
clearTimeout(this.timerId); |
||||
this.timerId = null; |
||||
} else { |
||||
console.error("Timer is not running."); |
||||
} |
||||
} |
||||
} |
@ -0,0 +1 @@ |
||||
export { RemixAITab } from './lib/components/RemixAI' |
@ -0,0 +1,84 @@ |
||||
import React, { useContext, useEffect, useState } from 'react' |
||||
import '../remix-ai.css' |
||||
import { DefaultModels } from '@remix/remix-ai-core'; |
||||
|
||||
export const Default = (props) => { |
||||
const [searchText, setSearchText] = useState(''); |
||||
const [resultText, setResultText] = useState(''); |
||||
const pluginName = 'remixAI' |
||||
const appendText = (newText) => { |
||||
setResultText(resultText => resultText + newText); |
||||
} |
||||
|
||||
useEffect(() => { |
||||
const handleResultReady = async (e) => { |
||||
appendText(e); |
||||
}; |
||||
if (props.plugin.isOnDesktop ) { |
||||
props.plugin.on(props.plugin.remixDesktopPluginName, 'onStreamResult', (value) => { |
||||
handleResultReady(value); |
||||
}) |
||||
} |
||||
}, []) |
||||
|
||||
return ( |
||||
<div> |
||||
<div className="remix_ai_plugin_search_container"> |
||||
<input |
||||
type="text" |
||||
className="remix_ai_plugin_search-input" |
||||
placeholder="Search..." |
||||
value={searchText} |
||||
onChange={() => console.log('searchText not implememted')} |
||||
></input> |
||||
<button |
||||
className="remix_ai_plugin_search_button text-ai pl-2 pr-0 py-0 d-flex" |
||||
onClick={() => console.log('searchText not implememted')} |
||||
> |
||||
<i |
||||
className="fa-solid fa-arrow-right" |
||||
style={{ color: 'black' }} |
||||
></i> |
||||
<span className="position-relative text-ai text-sm pl-1" |
||||
style={{ fontSize: "x-small", alignSelf: "end" }}>Search</span> |
||||
</button> |
||||
|
||||
<button className="remix_ai_plugin_download_button text-ai pl-2 pr-0 py-0 d-flex" |
||||
|
||||
onClick={async () => { |
||||
if (props.plugin.isOnDesktop ) { |
||||
await props.plugin.call(pluginName, 'downloadModel', DefaultModels()[3]); |
||||
} |
||||
}} |
||||
> Download Model </button> |
||||
|
||||
</div> |
||||
|
||||
<div className="remix_ai_plugin_find_container_internal"> |
||||
<textarea |
||||
className="remix_ai_plugin_search_result_textbox" |
||||
rows={10} |
||||
cols={50} |
||||
placeholder="Results..." |
||||
onChange={(e) => { |
||||
console.log('resultText changed', e.target.value) |
||||
setResultText(e.target.value)} |
||||
} |
||||
value={resultText} |
||||
readOnly |
||||
/> |
||||
<button className="remix_ai_plugin_download_button text-ai pl-2 pr-0 py-0 d-flex" |
||||
|
||||
onClick={async () => { |
||||
props.plugin.call("remixAI", 'initialize', DefaultModels()[1], DefaultModels()[3]); |
||||
}} |
||||
> Init Model </button> |
||||
</div> |
||||
<div className="remix_ai_plugin_find-part"> |
||||
<a href="#" className="remix_ai_plugin_search_result_item_title">/fix the problems in my code</a> |
||||
<a href="#" className="remix_ai_plugin_search_result_item_title">/tests add unit tests for my code</a> |
||||
<a href="#" className="remix_ai_plugin_search_result_item_title">/explain how the selected code works</a> |
||||
</div> |
||||
</div> |
||||
); |
||||
} |
@ -0,0 +1,78 @@ |
||||
// UI interface for selecting a model from a list of models
|
||||
// This component is used in the ModelSelectionModal component
|
||||
// It is a dropdown list of models that the user can select from
|
||||
// The user can also search for a specific model by typing in the search bar
|
||||
// The user can also filter the models by type
|
||||
// The user can select a model from the dropdown list
|
||||
// the panel controlling the model selection can be hidden or shown
|
||||
// Once selected, the model is either loaded from the local storage or downloaded
|
||||
// the remix ai desktop plugin provided the interface for storing the model in the local storage after downloading
|
||||
|
||||
import React, { useState, useEffect } from 'react'; |
||||
import { Select, Input, Button, Icon } from 'antd'; |
||||
import { IModel } from '@remix/remix-ai-core'; |
||||
import { DefaultModels } from '@remix/remix-ai-core'; |
||||
import { ModelType } from '@remix/remix-ai-core'; |
||||
import { useTranslation } from 'react-i18next'; |
||||
|
||||
const { Option } = Select; |
||||
const { Search } = Input; |
||||
|
||||
interface ModelSelectionProps { |
||||
onSelect: (model: IModel) => void; |
||||
} |
||||
|
||||
export const ModelSelection: React.FC<ModelSelectionProps> = ({ onSelect }) => { |
||||
const { t } = useTranslation(); |
||||
const [models, setModels] = useState<IModel[]>([]); |
||||
const [filteredModels, setFilteredModels] = useState<IModel[]>([]); |
||||
const [search, setSearch] = useState<string>(''); |
||||
const [type, setType] = useState<ModelType | 'all'>('all'); |
||||
|
||||
useEffect(() => { |
||||
setModels(DefaultModels()); |
||||
}, []); |
||||
|
||||
useEffect(() => { |
||||
setFilteredModels(models.filter((model) => { |
||||
return model.name.toLowerCase().includes(search.toLowerCase()) && |
||||
(type === 'all' || model.modelType === type); |
||||
})); |
||||
}, [models, search, type]); |
||||
|
||||
return ( |
||||
<div> |
||||
<Search |
||||
placeholder={t('search_models')} |
||||
onChange={(e) => setSearch(e.target.value)} |
||||
style={{ width: 200, marginBottom: 10 }} |
||||
/> |
||||
<Select |
||||
defaultValue="all" |
||||
style={{ width: 200, marginBottom: 10 }} |
||||
onChange={(value) => setType(value)} |
||||
> |
||||
<Option value="all">{t('all_models')}</Option> |
||||
<Option value={ModelType.IMAGE}>{t('image_models')}</Option> |
||||
<Option value={ModelType.TEXT}>{t('text_models')}</Option> |
||||
<Option value={ModelType.AUDIO}>{t('audio_models')}</Option> |
||||
</Select> |
||||
<Select |
||||
showSearch |
||||
style={{ width: 200 }} |
||||
placeholder={t('select_model')} |
||||
optionFilterProp="children" |
||||
onChange={(value) => onSelect(models.find((model) => model.name === value))} |
||||
filterOption={(input, option) => |
||||
option.props.children.toLowerCase().indexOf(input.toLowerCase()) >= 0 |
||||
} |
||||
> |
||||
{filteredModels.map((model) => ( |
||||
<Option key={model.name} value={model.name}> |
||||
{model.name} |
||||
</Option> |
||||
))} |
||||
</Select> |
||||
</div> |
||||
); |
||||
}; |
@ -0,0 +1,15 @@ |
||||
import React, { useContext } from 'react' |
||||
import '../remix-ai.css' |
||||
import { Default } from './Default' |
||||
|
||||
export const RemixAITab = (props) => { |
||||
|
||||
const plugin = props.plugin |
||||
return ( |
||||
<> |
||||
<div id="remixAITab pr-4 px-2 pb-4"> |
||||
<Default plugin={plugin}></Default> |
||||
</div> |
||||
</> |
||||
) |
||||
} |
@ -0,0 +1,167 @@ |
||||
/* Existing CSS */ |
||||
|
||||
.remix_ai_plugin_search_result_item_title { |
||||
display: flex; |
||||
-webkit-user-select: none; /* Safari */ |
||||
-moz-user-select: none; /* Firefox */ |
||||
-ms-user-select: none; /* IE10+/Edge */ |
||||
user-select: none; /* Standard */ |
||||
cursor: pointer; |
||||
align-items: center; |
||||
color: #58a6ff; |
||||
text-decoration: none; |
||||
font-size: 1.2em; |
||||
margin: 10px 0; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_result_item_title:hover { |
||||
text-decoration: underline; |
||||
} |
||||
|
||||
.remix_ai_plugin_wrap_summary { |
||||
overflow: hidden; |
||||
white-space: nowrap; |
||||
-webkit-user-select: none; /* Safari */ |
||||
-moz-user-select: none; /* Firefox */ |
||||
-ms-user-select: none; /* IE10+/Edge */ |
||||
user-select: none; /* Standard */ |
||||
cursor: pointer; |
||||
} |
||||
|
||||
.remix_ai_plugin_find-part { |
||||
display: flex; |
||||
flex-direction: column; |
||||
padding-top: 5px; |
||||
} |
||||
|
||||
.remix_ai_plugin_controls { |
||||
display: flex; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_line_container { |
||||
display: flex; |
||||
flex-direction: row; |
||||
position: relative; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_line { |
||||
width: 100%; |
||||
overflow: hidden; |
||||
display: flex; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_control { |
||||
flex-grow: 0; |
||||
position: absolute; |
||||
right: 0px; |
||||
top: 0px; |
||||
} |
||||
|
||||
.remix_ai_plugin_summary_right { |
||||
min-width: 0; |
||||
white-space: pre; |
||||
text-overflow: ellipsis; |
||||
overflow: hidden; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_replace_strike { |
||||
text-decoration: line-through; |
||||
} |
||||
|
||||
.remix_ai_plugin_summary_left { |
||||
white-space: pre; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab mark { |
||||
padding: 0; |
||||
white-space: pre; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_line_container .remix_ai_plugin_search_control { |
||||
display: none; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_line_container:hover .remix_ai_plugin_search_control { |
||||
display: block; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_line_container:hover .remix_ai_plugin_search_line { |
||||
width: 93%; |
||||
} |
||||
|
||||
.remix_ai_plugin_search-input { |
||||
display: flex; |
||||
flex-direction: row; |
||||
align-items: center; |
||||
padding: 10px; |
||||
margin: 10px 0; |
||||
width: 100%; |
||||
max-width: 500px; |
||||
border: 1px solid #ccc; |
||||
border-radius: 4px; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .checked { |
||||
background-color: var(--secondary); |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_search_file_name { |
||||
text-overflow: ellipsis; |
||||
overflow: hidden; |
||||
text-transform: uppercase; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_result_count { |
||||
flex-grow: 1; |
||||
text-align: right; |
||||
display: flex; |
||||
justify-content: flex-end; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_tab .remix_ai_plugin_result_count_number { |
||||
font-size: x-small; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_container { |
||||
display: flex; |
||||
flex-direction: row; |
||||
justify-content: center; |
||||
margin-top: 20px; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_container_internal { |
||||
display: flex; |
||||
flex-direction: column; |
||||
flex-grow: 1; |
||||
margin-top: 20px; |
||||
align-items: center; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_container_arrow { |
||||
display: flex !important; |
||||
align-items: center; |
||||
cursor: pointer !important; |
||||
} |
||||
|
||||
.remix_ai_plugin_wrap_summary_replace { |
||||
display: flex; |
||||
flex-direction: row; |
||||
justify-content: flex-end; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_indicator { |
||||
white-space: pre; |
||||
text-overflow: ellipsis; |
||||
overflow: hidden; |
||||
} |
||||
|
||||
.remix_ai_plugin_search_result_textbox { |
||||
width: 100%; |
||||
max-width: 500px; |
||||
padding: 10px; |
||||
border: 1px solid #ccc; |
||||
border-radius: 4px; |
||||
resize: none; |
||||
margin: 10px 0; |
||||
color: #333; |
||||
} |
File diff suppressed because it is too large
Load Diff
Loading…
Reference in new issue