pull/5241/head
STetsing 4 months ago
parent 39e89ab6ac
commit 52408f32e5
  1. 15
      apps/remix-ide/src/app/plugins/remixAIPlugin.tsx
  2. 16
      libs/remix-ai-core/src/inferencers/remote/remoteInference.ts
  3. 2
      libs/remix-ai-core/src/types/models.ts
  4. 9
      libs/remix-ui/tabs/src/lib/remix-ui-tabs.tsx

@ -3,7 +3,7 @@ import { ViewPlugin } from '@remixproject/engine-web'
import { Plugin } from '@remixproject/engine';
import { RemixAITab, ChatApi } from '@remix-ui/remix-ai'
import React, { useCallback } from 'react';
import { ICompletions, IModel, RemoteInferencer, IRemoteModel, IParams, GenerationParams, CodeExplainAgent} from '@remix/remix-ai-core';
import { ICompletions, IModel, RemoteInferencer, IRemoteModel, IParams, GenerationParams, CodeExplainAgent } from '@remix/remix-ai-core';
type chatRequestBufferT<T> = {
[key in keyof T]: T[key]
@ -52,8 +52,8 @@ export class RemixAIPlugin extends ViewPlugin {
console.log('Activating RemixAIPlugin on browser')
this.initialize()
}
this.call('sidePanel', 'pinView', profile)
}
async initialize(model1?:IModel, model2?:IModel, remoteModel?:IRemoteModel, useRemote?:boolean){
if (this.isOnDesktop) {
// on desktop use remote inferencer -> false
@ -130,7 +130,7 @@ export class RemixAIPlugin extends ViewPlugin {
this.call('terminal', 'log', { type: 'aitypewriterwarning', value: "RemixAI is already busy!" })
return
}
let result
if (this.isOnDesktop) {
result = await this.call(this.remixDesktopPluginName, 'code_explaining', prompt, context, params)
@ -175,25 +175,24 @@ export class RemixAIPlugin extends ViewPlugin {
}
if (pipeMessage) ChatApi.composer.send(pipeMessage)
else {
if (fn === "code_explaining") ChatApi.composer.send("Explain the current code")
if (fn === "code_explaining") ChatApi.composer.send("Explain the current code")
else if (fn === "error_explaining") ChatApi.composer.send("Explain the error")
else if (fn === "solidity_answer") ChatApi.composer.send("Answer the following question")
else if (fn === "solidity_answer") ChatApi.composer.send("Answer the following question")
else console.log("chatRequestBuffer is not empty. First process the last request.")
}
}
else{
else {
console.log("chatRequestBuffer is not empty. First process the last request.")
}
}
async ProcessChatRequestBuffer(params:IParams=GenerationParams){
if (this.chatRequestBuffer != null){
const result = this[this.chatRequestBuffer.fn_name](this.chatRequestBuffer.prompt, this.chatRequestBuffer.context, params)
this.chatRequestBuffer = null
return result
}
else{
else {
console.log("chatRequestBuffer is empty.")
return ""
}

@ -17,7 +17,7 @@ export class RemoteInferencer implements ICompletions {
model_op = RemoteBackendOPModel.CODELLAMA // default model operation change this to llama if necessary
event: EventEmitter
test_env=false
test_url="http://solcodertest.org/"
test_url="http://solcodertest.org"
constructor(apiUrl?:string, completionUrl?:string) {
this.api_url = apiUrl!==undefined ? apiUrl: this.test_env? this.test_url : "https://solcoder.remixproject.org"
@ -42,6 +42,7 @@ export class RemoteInferencer implements ICompletions {
}
case AIRequestType.GENERAL:
if (result.statusText === "OK") {
if (result.data?.error) return result.data?.error
const resultText = result.data.generatedText
ChatHistory.pushHistory(payload.prompt, resultText)
return resultText
@ -75,11 +76,9 @@ export class RemoteInferencer implements ICompletions {
if (payload.return_stream_response) {
return response
}
const reader = response.body!.getReader();
const decoder = new TextDecoder();
const parser = new JsonStreamParser();
while (true) {
const { done, value } = await reader.read();
if (done) break;
@ -87,7 +86,6 @@ export class RemoteInferencer implements ICompletions {
try {
console.log("value" + decoder.decode(value))
const chunk = parser.safeJsonParse<{ generatedText: string; isGenerating: boolean }>(decoder.decode(value, { stream: true }));
for (const parsedData of chunk) {
if (parsedData.isGenerating) {
this.event.emit('onStreamResult', parsedData.generatedText);
@ -123,14 +121,14 @@ export class RemoteInferencer implements ICompletions {
async code_insertion(msg_pfx, msg_sfx, options:IParams=InsertionParams): Promise<any> {
// const payload = { "data":[msg_pfx, "code_insertion", msg_sfx, 1024, 0.5, 0.92, 50]}
const payload = {"endpoint":"code_insertion", msg_pfx, msg_sfx, ...options, prompt: '' }
const payload = { "endpoint":"code_insertion", msg_pfx, msg_sfx, ...options, prompt: '' }
return this._makeRequest(payload, AIRequestType.COMPLETION)
}
async code_generation(prompt, options:IParams=GenerationParams): Promise<any> {
// const payload = { "data":[prompt, "code_completion", "", false,1000,0.9,0.92,50]}
const payload = { prompt, "endpoint":"code_completion", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.COMPLETION)
else return this._makeRequest(payload, AIRequestType.COMPLETION)
}
@ -138,20 +136,20 @@ export class RemoteInferencer implements ICompletions {
const main_prompt = buildSolgptPromt(prompt, this.model_op)
// const payload = { "data":[main_prompt, "solidity_answer", false,2000,0.9,0.8,50]}
const payload = { 'prompt': main_prompt, "endpoint":"solidity_answer", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
else return this._makeRequest(payload, AIRequestType.GENERAL)
}
async code_explaining(prompt, context:string="", options:IParams=GenerationParams): Promise<any> {
// const payload = { "data":[prompt, "code_explaining", false,2000,0.9,0.8,50, context]}
const payload = { prompt, "endpoint":"code_explaining", context, ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload, AIRequestType.GENERAL)
else return this._makeRequest(payload, AIRequestType.GENERAL)
}
async error_explaining(prompt, options:IParams=GenerationParams): Promise<any> {
const payload = { prompt, "endpoint":"error_explaining", ...options }
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL)
if (options.stream_result) return this._streamInferenceRequest(payload.endpoint, payload , AIRequestType.GENERAL)
else return this._makeRequest(payload, AIRequestType.GENERAL)
}
}

@ -61,6 +61,7 @@ const CompletionParams:IParams = {
topK: 40,
topP: 0.92,
max_new_tokens: 15,
stream_result: false,
}
const InsertionParams:IParams = {
@ -68,6 +69,7 @@ const InsertionParams:IParams = {
topK: 40,
topP: 0.92,
max_new_tokens: 150,
stream_result: false,
}
const GenerationParams:IParams = {

@ -252,10 +252,9 @@ export const TabsUI = (props: TabsUIProps) => {
setExplaining(true)
// if plugin is pinned,
if (await props.plugin.call('pinnedPanel', 'currentFocus') === 'remixAI'){
console.log("pinned has focus")
await props.plugin.call('remixAI', 'chatPipe', 'code_explaining', content)
}
else{
else {
const profile = {
name: 'remixAI',
displayName: 'Remix AI',
@ -271,14 +270,12 @@ export const TabsUI = (props: TabsUIProps) => {
documentation: 'https://remix-ide.readthedocs.io/en/latest/remixai.html',
maintainedBy: 'Remix'
}
console.log("pinned does not have focus")
// await props.plugin.call('sidePanel', 'focus', 'remixAI')
await props.plugin.call('sidePanel', 'pinView', profile)
setTimeout(async () => {
await props.plugin.call('remixAI', 'chatPipe', 'code_explaining', content)
}, 500)
await props.plugin.call('remixAI', 'chatPipe', 'code_explaining', content)
}, 500)
}
// await props.plugin.call('remixAI', 'code_explaining', content)
setExplaining(false)
_paq.push(['trackEvent', 'ai', 'remixAI', 'explain_file'])

Loading…
Cancel
Save