6 Commits
1.2.3 ... 1.3.1

Author SHA1 Message Date
Achiya Elyasaf
082afd778f Raise version 2023-12-05 11:00:37 +02:00
Achiya Elyasaf
dd19cd9fa3 Fixed the system context for the Complete command 2023-12-05 10:59:37 +02:00
Achiya Elyasaf
eb2a9ae867 Raise version 2023-12-04 13:53:23 +02:00
Achiya Elyasaf
a7b540de70 Merge pull request #7 from bThink-BGU/chat-api
Update OpenAI API
2023-12-04 13:52:21 +02:00
Achiya Elyasaf
116c4898d5 Replace the API to the new OpenAI Chat Completion API.
Closes #6.
2023-12-04 13:51:16 +02:00
Achiya Elyasaf
7c475c9de1 First commit for OpenAI's chat api 2023-12-04 12:50:01 +02:00
2 changed files with 29 additions and 16 deletions

View File

@@ -42,5 +42,5 @@
"manifest_version": 3, "manifest_version": 3,
"name": "LeafLLM", "name": "LeafLLM",
"homepage_url": "https://github.com/achiyae/LeafLLM", "homepage_url": "https://github.com/achiyae/LeafLLM",
"version": "1.2.3" "version": "1.3.1"
} }

View File

@@ -1,6 +1,6 @@
class OpenAIAPI { class OpenAIAPI {
static defaultModel = 'text-davinci-003' static defaultModel = 'gpt-3.5-turbo'
constructor(apiKey) { constructor(apiKey) {
this.apiKey = apiKey this.apiKey = apiKey
@@ -11,6 +11,8 @@ class OpenAIAPI {
const url = `https://api.openai.com/v1/${endpoint}` const url = `https://api.openai.com/v1/${endpoint}`
if (!data.model) data.model = OpenAIAPI.defaultModel if (!data.model) data.model = OpenAIAPI.defaultModel
if (!data.n) data.n = 1
if (!data.temperature) data.temperature = 0.5
const xhr = new XMLHttpRequest() const xhr = new XMLHttpRequest()
xhr.open('POST', url, true) xhr.open('POST', url, true)
@@ -34,27 +36,38 @@ class OpenAIAPI {
async completeText(text) { async completeText(text) {
const data = { const data = {
max_tokens: 512, max_tokens: 512,
prompt: text, messages: [
n: 1, { role: 'system', content: 'You are an assistant in a Latex editor that continues the given text. No need to rewrite the given text' },
temperature: 0.5 { role: 'user', 'content': text }
],
} }
return this.query('completions', data) return this.query('chat/completions', data)
.then(result => result[0].text) .then(result => result[0]['message'].content)
} }
async improveText(text) { async improveText(text) {
const data = { const data = {
model: 'code-davinci-edit-001', messages: [
input: text, { role: 'system', content: 'You are an assistant in a Latex editor' },
instruction: { role: 'user', 'content': 'Improve the following text:\n'+text }],
'Correct any spelling mistakes, grammar mistakes, and improve the overall style of the (latex) text.',
n: 1,
temperature: 0.5
} }
return this.query('edits', data) return this.query('chat/completions', data)
.then(result => result[0].text) .then(result => result[0]['message'].content)
}
async ask(text) {
const data = {
max_tokens: 512,
messages: [
{ role: 'system', content: 'You are an assistant in a Latex editor. Answer questions without introduction/explanations' },
{ role: 'user', 'content': text }
],
}
return this.query('chat/completions', data)
.then(result => result[0]['message'].content)
} }
} }
@@ -108,7 +121,7 @@ async function askHandler(openAI) {
const selection = window.getSelection() const selection = window.getSelection()
const selectedText = selection.toString() const selectedText = selection.toString()
if (!selectedText) return if (!selectedText) return
const editedText = (await openAI.completeText('In latex, ' + selectedText)).trimStart() const editedText = (await openAI.ask(selectedText)).trimStart()
replaceSelectedText(editedText, selection) replaceSelectedText(editedText, selection)
} }