Build a RAG Pipeline with JavaScript / Node.js
This guide provides a step-by-step walkthrough to building a complete Retrieval-Augmented Generation (RAG) system in JavaScript. By leveraging FoxNose as your knowledge layer, you will create a working Q&A application that delivers accurate, grounded answers sourced directly from your FoxNose knowledge base.
Looking for the Python version? See Build a RAG Pipeline with Python.
What you'll build: A Node.js application that:
- Takes a user question.
- Searches your FoxNose knowledge base for relevant context.
- Sends the context and question to an LLM (e.g., GPT-4).
- Returns a grounded answer with clear source references.
Prerequisites:
- A FoxNose environment with some content already present (see our Quick Start guide to set this up if needed).
- A configured Flux API with at least one folder connected and accessible.
- Node.js 18 or higher.
- An OpenAI API key.
Step 1: Set Up Your Project
Begin by creating a new project directory and installing the necessary dependencies:
mkdir foxnose-rag && cd foxnose-rag
npm init -y
npm install @foxnose/sdk openai dotenv
@foxnose/sdk: The official FoxNose JavaScript SDK for interacting with the Flux API.openai: The official client library for interacting with OpenAI's LLMs.dotenv: To load environment variables from a.envfile for secure credential management.
Enable ES modules by adding "type": "module" to your package.json:
{
"type": "module"
}
Next, create a .env file in your project's root directory to securely store your credentials:
OPENAI_API_KEY=sk-...
FOXNOSE_ENV_KEY=your-environment-key # Your FoxNose Environment Key
FOXNOSE_PUBLIC_KEY=your-public-key # Your Flux API public key
FOXNOSE_SECRET_KEY=your-secret-key # Your Flux API secret key
You can find your FOXNOSE_ENV_KEY in the FoxNose dashboard under Environment → Settings. The API keys are generated when you create a Flux API key.
Step 2: Initialize the FoxNose SDK
Create a file named foxnose-client.mjs. This module initializes the FoxNose SDK and provides a flexible search function.
import 'dotenv/config'
import { FluxClient, SimpleKeyAuth } from '@foxnose/sdk'
export function getFluxClient(apiPrefix) {
return new FluxClient({
baseUrl: `https://${process.env.FOXNOSE_ENV_KEY}.fxns.io`,
apiPrefix,
auth: new SimpleKeyAuth(
process.env.FOXNOSE_PUBLIC_KEY,
process.env.FOXNOSE_SECRET_KEY,
),
})
}
/**
* Searches a FoxNose folder for relevant content.
*
* @param {string} apiPrefix - The Flux API prefix (e.g., 'my_api').
* @param {string} folderPath - Path to the folder to search (e.g., 'knowledge-base').
* @param {string} query - The natural language search query.
* @param {object} [options]
* @param {'vector'|'hybrid'|'text'} [options.mode='hybrid'] - Search mode.
* @param {number} [options.topK=5] - Maximum number of results to return.
* @param {number} [options.similarityThreshold=0.7] - Minimum similarity score (0-1).
* @param {object} [options.filters] - Optional structured filters.
* @param {string} [options.contentField='body'] - Field containing main content.
* @param {string} [options.titleField='title'] - Field containing the title.
* @param {string} [options.urlField='url'] - Field containing the source URL.
*/
export async function searchKnowledgeBase(
apiPrefix,
folderPath,
query,
{
mode = 'hybrid',
topK = 5,
similarityThreshold = 0.7,
filters = null,
contentField = 'body',
titleField = 'title',
urlField = 'url',
} = {},
) {
const client = getFluxClient(apiPrefix)
// Build the search request body
const body = { limit: topK }
if (mode === 'vector') {
body.search_mode = 'vector'
body.vector_search = {
query,
top_k: topK,
similarity_threshold: similarityThreshold,
}
} else if (mode === 'hybrid') {
body.search_mode = 'hybrid'
body.find_text = { query }
body.vector_search = {
query,
top_k: topK,
similarity_threshold: similarityThreshold,
}
} else {
body.find_text = { query }
}
// Add structured filters if provided
if (filters) {
body.where = {
$: { all_of: Object.entries(filters).map(([k, v]) => ({ [k]: v })) },
}
}
// Execute the search
const response = await client.search(folderPath, body)
client.close()
// Parse results
return (response.results ?? []).map((resource) => ({
key: resource._sys?.key ?? '',
title: resource.data?.[titleField] ?? 'Untitled',
content: resource.data?.[contentField] ?? '',
url: resource.data?.[urlField] ?? null,
}))
}
This code returns structured result objects with optional source URLs for citations. Field names (contentField, titleField, urlField) are configurable per call.
Step 3: Build the RAG Function
Create rag.mjs. This file will contain the core logic for your RAG pipeline, which orchestrates the calls to FoxNose and your LLM.
import 'dotenv/config'
import OpenAI from 'openai'
import { searchKnowledgeBase } from './foxnose-client.mjs'
const openai = new OpenAI()
// Configure which API and folder to search (adjust for your setup)
const API_PREFIX = 'my_api'
const FOLDER_PATH = 'path/to/knowledge-base'
function buildPromptContext(results) {
if (results.length === 0) {
return 'No relevant information was found in the knowledge base.'
}
return results
.map((result, i) => {
let sourceInfo = `[Source ${i + 1}: ${result.title}]`
if (result.url) sourceInfo += ` (${result.url})`
return `${sourceInfo}\n${result.content}`
})
.join('\n\n---\n\n')
}
/**
* Asks a question, retrieves context from FoxNose, and generates an answer using an LLM.
*
* @param {string} question - The user's question.
* @param {object} [options]
* @param {number} [options.topK=5] - Maximum number of results to return.
* @param {number} [options.similarityThreshold=0.7] - Minimum similarity score.
* @param {object} [options.filters] - Optional structured filters.
* @returns {Promise<{answer: string, sources: Array}>}
*/
export async function ask(
question,
{ topK = 5, similarityThreshold = 0.7, filters = null } = {},
) {
// 1. Retrieve relevant context from your FoxNose knowledge base
console.log(`Searching for context related to: '${question}'...`)
const results = await searchKnowledgeBase(API_PREFIX, FOLDER_PATH, question, {
mode: 'hybrid',
topK,
similarityThreshold,
filters,
})
// 2. Build the context string to be injected into the LLM prompt
const context = buildPromptContext(results)
// 3. Generate a grounded answer using the LLM and the retrieved context
const systemPrompt = `You are a helpful assistant that answers questions based ONLY on the provided context.
- If the context doesn't contain the answer, state that you don't have enough information.
- Be concise and direct.
- When possible, cite the source title and include the URL so users can read more.`
const userPrompt = `Context:\n${context}\n\nQuestion: ${question}\n\nAnswer:`
console.log('Generating answer with LLM...')
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: systemPrompt },
{ role: 'user', content: userPrompt },
],
temperature: 0,
})
const answer = response.choices[0].message.content
// 4. Return the final answer and the source documents for verification
return {
answer,
sources: results.map((r) => ({ key: r.key, title: r.title, url: r.url })),
}
}
Key design decisions in this RAG function:
API_PREFIXandFOLDER_PATHare defined as constants here for simplicity, but you can make them parameters if your agent needs to search multiple sources.- Uses
hybridsearch mode by default for the best balance of semantic understanding and keyword matching. - The
systemPromptstrictly instructs the LLM to only use the provided context, reducing hallucinations. - Source URLs are included in the context, allowing the LLM to cite them in responses.
Step 4: Create the Main Application Loop
Create a final file, main.mjs, to run your Q&A application from the command line.
import { createInterface } from 'node:readline/promises'
import { ask } from './rag.mjs'
async function main() {
const rl = createInterface({
input: process.stdin,
output: process.stdout,
})
console.log('FoxNose RAG Q&A System')
console.log("Type 'quit' or 'exit' to stop.\n")
while (true) {
const question = (await rl.question('Question: ')).trim()
if (['quit', 'exit'].includes(question.toLowerCase())) break
if (!question) continue
const result = await ask(question)
console.log(`\nAnswer: ${result.answer}\n`)
if (result.sources.length > 0) {
console.log('Sources:')
for (const source of result.sources) {
if (source.url) {
console.log(` - ${source.title}: ${source.url}`)
} else {
console.log(` - ${source.title} (ID: ${source.key})`)
}
}
}
console.log('-'.repeat(40))
}
rl.close()
}
main()
Run your application from the terminal:
node main.mjs
Step 5 (Optional): Add Structured Filters
For questions that include specific constraints (e.g., "What was our revenue in Q4 2024?"), you can combine semantic search with structured filters.
Modify the ask() function in rag.mjs to support filtering by category:
// In rag.mjs, modify the ask() function signature:
export async function ask(
question,
{ topK = 5, similarityThreshold = 0.7, category = null } = {},
) {
// Build a filter object if a category is provided
const filters = category ? { category__eq: category } : null
const results = await searchKnowledgeBase(API_PREFIX, FOLDER_PATH, question, {
mode: 'hybrid',
topK,
similarityThreshold,
filters,
})
// ... the rest of the function remains the same
}
Now your application can handle both general and constrained questions:
// A general question
await ask('How do I reset my password?')
// A constrained question filtered by category
await ask('What\'s our return policy?', { category: 'policies' })
The filters parameter supports all FoxNose filter operators. See the Search & Filtering guide for the full list.
Step 6 (Optional): Integrate with LangChain.js
If you're using LangChain.js, the official @foxnose/langchain package provides a ready-made FoxNoseRetriever that plugs straight into any LangChain.js chain.
Install the packages:
npm install @foxnose/langchain @langchain/openai langchain
Create retriever.mjs:
import 'dotenv/config'
import { FluxClient, SimpleKeyAuth } from '@foxnose/sdk'
import { FoxNoseRetriever } from '@foxnose/langchain'
import { ChatOpenAI } from '@langchain/openai'
import { ChatPromptTemplate } from '@langchain/core/prompts'
import { createStuffDocumentsChain } from 'langchain/chains/combine_documents'
import { createRetrievalChain } from 'langchain/chains/retrieval'
const client = new FluxClient({
baseUrl: `https://${process.env.FOXNOSE_ENV_KEY}.fxns.io`,
apiPrefix: 'my_api',
auth: new SimpleKeyAuth(
process.env.FOXNOSE_PUBLIC_KEY,
process.env.FOXNOSE_SECRET_KEY,
),
})
const retriever = new FoxNoseRetriever({
client,
folderPath: 'knowledge-base',
pageContentField: 'body',
searchMode: 'hybrid',
topK: 5,
})
const llm = new ChatOpenAI({ model: 'gpt-4o', temperature: 0 })
const prompt = ChatPromptTemplate.fromTemplate(
`Answer the user's question based only on the following context:
{context}
Question: {input}
Answer:`,
)
const combineDocsChain = await createStuffDocumentsChain({ llm, prompt })
const chain = await createRetrievalChain({
retriever,
combineDocsChain,
})
// Example usage
const result = await chain.invoke({ input: 'How do I reset my password?' })
console.log(result.answer)
console.log('\nSources:')
for (const doc of result.context) {
console.log(` - ${doc.metadata.title}`)
}
See the LangChain.js Integration page for more examples — vector search, hybrid with custom weights, filtered retrieval, and agent tools. For broader RAG architecture patterns, see the LLM Integrations guide.
Troubleshooting
- No results returned? Check that your folder has content with
vectorizablefields and try lowering thesimilarityThreshold(e.g., to 0.5). Also, verify the folder is connected to your Flux API withget_manyaccess. - Irrelevant results? Ensure
contentFieldmatches your schema's vectorizable field. Consider adding filters to narrow down the context. - Authentication errors? Verify your
FOXNOSE_PUBLIC_KEYandFOXNOSE_SECRET_KEYare correct. See the Flux Authentication guide for details. - Connection errors? Check that your
FOXNOSE_ENV_KEYis correct and theapiPrefixmatches an existing Flux API.
Next Steps & API Reference
- LLM Integrations Overview → Review the high-level architecture and best practices for RAG systems.
- Search & Filtering Guide → Master all search modes, including filters, joins, and pagination.
- Flux API Reference → Get full technical details on all API endpoints.