A JavaScript library for interacting with local LLM capabilities via Bodhi Browser Extension.
You can use bodhijs either via npm or directly in the browser with a script tag.
npm install @bodhiapp/bodhijs
Download the latest bodhi.js
from the releases page and include it in your HTML:
<script src="dist/bodhi.js"></script>
import { bodhijs } from '@bodhiapp/bodhijs';
// Check if extension is installed
const isInstalled = bodhijs.isInstalled();
console.log('Extension installed:', isInstalled);
// Example chat completion
async function example() {
try {
// First verify connection with a ping
const pingResult = await bodhijs.ping();
console.log('Ping result:', pingResult);
// Use chat completions
const response = await bodhijs.chat.completions.create({
model: 'bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M',
messages: [
{ role: 'user', content: 'Hello, how can you help me?' }
]
});
console.log('Chat response:', response);
} catch (error) {
console.error('Error:', error);
}
}
// Call the example function
example();
You can also use streaming responses to get incremental tokens as they're generated:
import { bodhijs } from '@bodhiapp/bodhijs';
async function streamingExample() {
try {
// Create a streaming request by setting stream: true
const stream = await bodhijs.chat.completions.create({
model: 'bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M',
messages: [
{ role: 'user', content: 'Write a short poem about AI' }
],
stream: true // Enable streaming
});
// Process stream chunks as they arrive
for await (const chunk of stream) {
// Each chunk has a `choices` array with deltas
if (chunk.choices[0].delta.content) {
// Process each token as it arrives
process.stdout.write(chunk.choices[0].delta.content);
}
}
} catch (error) {
console.error('Streaming error:', error);
}
}
streamingExample();
<script src="dist/bodhi.js"></script>
<script>
// The library is available globally as 'bodhijs'
// Check if extension is installed
const isInstalled = bodhijs.isInstalled();
console.log('Extension installed:', isInstalled);
// Example chat completion
async function example() {
try {
// First verify connection with a ping
const pingResult = await bodhijs.ping();
console.log('Ping result:', pingResult);
// Use chat completions
const response = await bodhijs.chat.completions.create({
model: 'bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M',
messages: [
{ role: 'user', content: 'Hello, how can you help me?' }
]
});
console.log('Chat response:', response);
} catch (error) {
console.error('Error:', error);
}
}
// Call the example function
example();
</script>
Returns a boolean indicating whether the Bodhi Browser Extension is installed.
Tests the connection to the extension. Returns a promise that resolves to a ping response.
Creates a chat completion request.
-
options.model
: The model to use (e.g., 'bartowski/Meta-Llama-3.1-8B-Instruct-GGUF:Q4_K_M') -
options.messages
: Array of message objects withrole
andcontent
-
options.stream
: Boolean flag to enable streaming responses (default: false) -
options.temperature
: Controls randomness (0-2, default varies by model) -
options.top_p
: Controls diversity via nucleus sampling (0-1, default varies by model) -
options.max_tokens
: Maximum tokens to generate (default varies by model) - Other standard parameters following the OpenAI API format
Returns:
- When
stream: false
(default): A Promise that resolves to a ChatResponse object - When
stream: true
: A Stream object that yields ChatCompletionChunk objects as they arrive
# Install dependencies
npm install
# Build the library
npm run build
# Run in development mode with watch
npm run dev
# Run validation (format and lint checks)
npm run validate
# Format code
npm run format
# Run lint checks
npm run lint
The library is tested through integration tests in the integration-tests
directory. These tests verify both npm package usage and script tag usage.
To run the tests:
- Build the library:
npm run build
- Navigate to integration-tests:
cd ../integration-tests
- Run tests:
npm test
MIT