This is a generic API for calling different Generative AI models (OpenAI, VertexAI, Google, ...). The API could be invoked with some text, images or video prompt to get text or strem. It implements a Node.js module to be imported in your project
- gemini-2.0-flash
Get the following values from the Google credential JSON file by the procedure Get the complete procedure from here
Simpler but only for test or non industrial projects
- gemini-1.5-flash
Get your API key following this procedure
- gpt-3.5
- gpt-3.5-turbo
- gpt-4o-mini
npm install @drobs/ai-api
- GOOGLE_API_PROJECT_ID=Your Project ID (ex : the-experience-xxxxx)
- GOOGLE_API_PRIVATE_KEY_ID=Your Private Key ID (ex : 87adbdad1eb9b1540edf867bd8d79926eb2ad5c8)
- GOOGLE_API_PRIVATE_KEY=Your Private Key (ex : -----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkq...oq4SQJbV\n-----END PRIVATE KEY-----\n)
- GOOGLE_API_CLIENT_EMAIL=Your Google Account allowed to use VertexAI API (ex : user@project-projectid.iam.gserviceaccount.com)
- GOOGLE_API_CLIENT_ID=Your Google Client ID (ex : 107696212857897829246)
- GOOGLE_API_REGION=Your Google Region (ex : europe-west9)
- GOOGLE_API_CLIENT_CERT_URL=https://www.googleapis.com/...
- GOOGLE_API_PRIVATE_KEY=Your Private Key
- OPENAI_API_KEY=Your Open API Key (ex : sk-tfB0mBpLPyQFeUzR7R48issEmHrjNbzCYsT3BlbkFJev3ZGj)
import { generateContent, getModels } from './index.js';
const options = {
temperature: 0, //from 0 (very deterministic) to 2 (very creative)
max_tokens: 100, //number of tken for the expected response
topK: 64, //probability of tokens to consider for each candidate (1=most probable token) (only for Google models)
topP: 1, //cumulative probability of tokens to consider for each candidate
candidateCount: 1 //number of candidate responses that will be return by the model
moderationModel:{} //soon available
}
const prompt = 'Tell me a story';
const response = generateContent (prompt, model, false, options);
console.log (response);
import { generateContent } from '@drobs/ai-api';
const prompt = "This is a test";
const model = 4; //Gemini 1.5 Flash via Vertex AI
const stream = false;
const options = {
temperature: 0,
max_tokens: 100,
topP: 1,
candidateCount: 1
}
function main () {
generateContent (prompt, model, stream, options)
.then (result => {
console.log(result);
})
.catch (error => {
console.error(error);
});
}
main();
Please feel free to contribute or report any issue on GitHub
npm run build
node run test