Prediction API is the primary endpoint for interacting with your Flowise flows and assistants. It allows you to send messages to your selected flow and receive responses back. This API handles the core chat functionality, including:
Chat Interactions: Send questions or messages to your flow and receive AI-generated responses
Streaming Responses: Get real-time streaming responses for better user experience
Conversation Memory: Maintain context across multiple messages within a session
File Processing: Upload and process images, audio, and other files as part of your queries
Dynamic Configuration: Override chatflow settings and pass variables at runtime
Flowise provides official SDKs for Python and TypeScript/JavaScript:
Installation
Python: pip install flowise
TypeScript/JavaScript: npm install flowise-sdk
Python SDK Usage
from flowise import Flowise, PredictionData
# Initialize client
client = Flowise(base_url="http://localhost:3000")
# Non-streaming prediction
try:
response = client.create_prediction(
PredictionData(
chatflowId="your-chatflow-id",
question="What is machine learning?",
streaming=False
)
)
# Handle response
for result in response:
print("Response:", result)
except Exception as e:
print(f"Error: {e}")
from flowise import Flowise, PredictionData
client = Flowise(base_url="http://localhost:3000")
# Streaming prediction
try:
response = client.create_prediction(
PredictionData(
chatflowId="your-chatflow-id",
question="Tell me a long story about AI",
streaming=True
)
)
# Process streaming chunks
print("Streaming response:")
for chunk in response:
print(chunk, end="", flush=True)
except Exception as e:
print(f"Error: {e}")
from flowise import Flowise, PredictionData
client = Flowise(base_url="http://localhost:3000")
# Advanced configuration
try:
response = client.create_prediction(
PredictionData(
chatflowId="your-chatflow-id",
question="Analyze this data",
streaming=False,
overrideConfig={
"sessionId": "user-session-123",
"temperature": 0.7,
"maxTokens": 500,
"returnSourceDocuments": True
}
)
)
for result in response:
print("Response:", result)
except Exception as e:
print(f"Error: {e}")
import requests
def chat_with_history(flow_id, question, history):
url = f"http://localhost:3000/api/v1/prediction/{flow_id}"
payload = {
"question": question,
"history": history
}
try:
response = requests.post(url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Example conversation with context
conversation_history = [
{
"role": "apiMessage",
"content": "Hello! I'm an AI assistant. How can I help you today?"
},
{
"role": "userMessage",
"content": "Hi, my name is Sarah and I'm learning about AI"
},
{
"role": "apiMessage",
"content": "Nice to meet you, Sarah! I'd be happy to help you learn about AI. What specific aspects interest you?"
}
]
result = chat_with_history(
flow_id="your-flow-id",
question="Can you explain neural networks in simple terms?",
history=conversation_history
)
print(result)
async function chatWithHistory(flowId, question, history) {
const url = `http://localhost:3000/api/v1/prediction/${flowId}`;
const payload = {
question: question,
history: history
};
try {
const response = await fetch(url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
} catch (error) {
console.error('Error:', error);
return null;
}
}
// Example conversation with context
const conversationHistory = [
{
role: "apiMessage",
content: "Hello! I'm an AI assistant. How can I help you today?"
},
{
role: "userMessage",
content: "Hi, my name is Sarah and I'm learning about AI"
},
{
role: "apiMessage",
content: "Nice to meet you, Sarah! I'd be happy to help you learn about AI. What specific aspects interest you?"
}
];
chatWithHistory(
'your-flow-id',
'Can you explain neural networks in simple terms?',
conversationHistory
).then(result => {
console.log(result);
});
Session Management
Use sessionId to maintain conversation state across multiple API calls. Each session maintains its own conversation context and memory.
import requests
class FlowiseSession:
def __init__(self, flow_id, session_id, base_url="http://localhost:3000"):
self.flow_id= flow_id
self.session_id = session_id
self.base_url = base_url
self.url = f"{base_url}/api/v1/prediction/{flow_id}"
def send_message(self, question, **kwargs):
payload = {
"question": question,
"overrideConfig": {
"sessionId": self.session_id,
**kwargs.get("overrideConfig", {})
}
}
# Add any additional parameters
for key, value in kwargs.items():
if key != "overrideConfig":
payload[key] = value
try:
response = requests.post(self.url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Usage
session = FlowiseSession(
flow_id="your-flow-id",
session_id="user-session-123"
)
# First message
response1 = session.send_message("Hello, my name is John")
print("Response 1:", response1)
# Second message - will remember the previous context
response2 = session.send_message("What's my name?")
print("Response 2:", response2)
class FlowiseSession {
constructor(flowId, sessionId, baseUrl = 'http://localhost:3000') {
this.flowId= flowId;
this.sessionId = sessionId;
this.baseUrl = baseUrl;
this.url = `${baseUrl}/api/v1/prediction/${flowId}`;
}
async sendMessage(question) {
const payload = {
question: question,
overrideConfig: {
sessionId: this.sessionId
}
};
try {
const response = await fetch(this.url, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
} catch (error) {
console.error('Error:', error);
return null;
}
}
}
// Usage
const session = new FlowiseSession(
'your-flow-id',
'user-session-123'
);
async function conversationExample() {
// First message
const response1 = await session.sendMessage("Hello, my name is John");
console.log("Response 1:", response1);
// Second message - will remember the previous context
const response2 = await session.sendMessage("What's my name?");
console.log("Response 2:", response2);
}
conversationExample();
Variables
Pass dynamic variables to your flow using the vars property in overrideConfig. Variables can be used in your flow to inject dynamic content.
Variables must be created first before you can override it. Refer to Variables
Upload images for visual analysis when your flow supports image processing. Refer to Image for more reference.
Upload Structure:
{
"data": "",
"type": "",
"name": ",
"mime": "
}
Data: Base64 or URL of an image
Type: url or file
Name: name of the image
Mime: image/png, image/jpeg, image/jpg
import requests
import base64
import os
def upload_image(flow_id, question, image_path):
# Read and encode image
with open(image_path, 'rb') as image_file:
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
# Determine MIME type based on file extension
mime_types = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp'
}
file_ext = os.path.splitext(image_path)[1].lower()
mime_type = mime_types.get(file_ext, 'image/png')
url = f"http://localhost:3000/api/v1/prediction/{flow_id}"
payload = {
"question": question,
"uploads": [
{
"data": f"data:{mime_type};base64,{encoded_image}",
"type": "file",
"name": os.path.basename(image_path),
"mime": mime_type
}
]
}
try:
response = requests.post(url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Example usage
result = upload_image(
flow_id="your-flow-id",
question="Can you describe what you see in this image?",
image_path="path/to/your/image.png"
)
print(result)
import requests
import os
def upload_image_url(flow_id, question, image_url, image_name=None):
"""
Upload an image using a URL instead of base64 encoding.
This is more efficient for images that are already hosted online.
"""
url = f"http://localhost:3000/api/v1/prediction/{flow_id}"
# Extract filename from URL if not provided
if not image_name:
image_name = image_url.split('/')[-1]
if '?' in image_name:
image_name = image_name.split('?')[0]
# Determine MIME type from URL extension
mime_types = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp'
}
file_ext = os.path.splitext(image_name)[1].lower()
mime_type = mime_types.get(file_ext, 'image/jpeg')
payload = {
"question": question,
"uploads": [
{
"data": image_url,
"type": "url",
"name": image_name,
"mime": mime_type
}
]
}
try:
response = requests.post(url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Example usage with public image URL
result = upload_image_url(
flow_id="your-flow-id",
question="What's in this image? Analyze it in detail.",
image_url="https://example.com/path/to/image.jpg",
image_name="example_image.jpg"
)
print(result)
# Example with direct URL (no custom name)
result2 = upload_image_url(
chatflow_id="your-chatflow-id",
question="Describe this screenshot",
image_url="https://i.imgur.com/sample.png"
)
print(result2)
async function uploadImage(flowId, question, imageFile) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = async function(e) {
const base64Data = e.target.result;
const payload = {
question: question,
uploads: [
{
data: base64Data,
type: 'file',
name: imageFile.name,
mime: imageFile.type
}
]
};
try {
const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const result = await response.json();
resolve(result);
} catch (error) {
reject(error);
}
};
reader.onerror = function() {
reject(new Error('Failed to read file'));
};
reader.readAsDataURL(imageFile);
});
}
// Example usage in browser
document.getElementById('imageInput').addEventListener('change', async function(e) {
const file = e.target.files[0];
if (file) {
try {
const result = await uploadImage(
'your-flow-id',
'Can you describe what you see in this image?',
file
);
console.log('Analysis result:', result);
} catch (error) {
console.error('Upload failed:', error);
}
}
});
async function uploadImageUrl(flowId, question, imageUrl, imageName = null) {
/**
* Upload an image using a URL instead of base64 encoding.
* This is more efficient for images that are already hosted online.
*/
// Extract filename from URL if not provided
if (!imageName) {
imageName = imageUrl.split('/').pop();
if (imageName.includes('?')) {
imageName = imageName.split('?')[0];
}
}
// Determine MIME type from URL extension
const mimeTypes = {
'.png': 'image/png',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg',
'.gif': 'image/gif',
'.webp': 'image/webp'
};
const fileExt = imageName.toLowerCase().substring(imageName.lastIndexOf('.'));
const mimeType = mimeTypes[fileExt] || 'image/jpeg';
const payload = {
question: question,
uploads: [
{
data: imageUrl,
type: 'url',
name: imageName,
mime: mimeType
}
]
};
try {
const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
} catch (error) {
console.error('Error:', error);
return null;
}
}
// Example usage with public image URL
async function analyzeImageFromUrl() {
try {
const result = await uploadImageUrl(
'your-flow-id',
'What is in this image? Analyze it in detail.',
'https://example.com/path/to/image.jpg',
'example_image.jpg'
);
console.log('Analysis result:', result);
} catch (error) {
console.error('Upload failed:', error);
}
}
// Example with direct URL (no custom name)
uploadImageUrl(
'your-flow-id',
'Describe this screenshot',
'https://i.imgur.com/sample.png'
).then(result => {
if (result) {
console.log('Analysis result:', result);
}
});
// Example with multiple image URLs
async function analyzeMultipleImages() {
const imageUrls = [
'https://example.com/image1.jpg',
'https://example.com/image2.png',
'https://example.com/image3.gif'
];
const results = await Promise.all(
imageUrls.map(url =>
uploadImageUrl(
'your-flow-id',
`Analyze this image: ${url}`,
url
)
)
);
results.forEach((result, index) => {
console.log(`Image ${index + 1} analysis:`, result);
});
}
import requests
import base64
import os
def upload_audio(flow_id, audio_path, question=None):
# Read and encode audio
with open(audio_path, 'rb') as audio_file:
encoded_audio = base64.b64encode(audio_file.read()).decode('utf-8')
# Determine MIME type based on file extension
mime_types = {
'.webm': 'audio/webm',
'.wav': 'audio/wav',
'.mp3': 'audio/mpeg',
'.m4a': 'audio/mp4'
}
file_ext = os.path.splitext(audio_path)[1].lower()
mime_type = mime_types.get(file_ext, 'audio/webm')
url = f"http://localhost:3000/api/v1/prediction/{flow_id}"
payload = {
"uploads": [
{
"data": f"data:{mime_type};base64,{encoded_audio}",
"type": "audio",
"name": os.path.basename(audio_path),
"mime": mime_type
}
]
}
# Add question if provided
if question:
payload["question"] = question
try:
response = requests.post(url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Example usage
result = upload_audio(
flow_id="your-flow-id",
audio_path="path/to/your/audio.wav",
question="Please transcribe this audio and summarize the content"
)
print(result)
import requests
import os
def upload_audio_url(flow_id, audio_url, question=None, audio_name=None):
"""
Upload an audio file using a URL instead of base64 encoding.
This is more efficient for audio files that are already hosted online.
"""
url = f"http://localhost:3000/api/v1/prediction/{flow_id}"
# Extract filename from URL if not provided
if not audio_name:
audio_name = audio_url.split('/')[-1]
if '?' in audio_name:
audio_name = audio_name.split('?')[0]
# Determine MIME type from URL extension
mime_types = {
'.webm': 'audio/webm',
'.wav': 'audio/wav',
'.mp3': 'audio/mpeg',
'.m4a': 'audio/mp4',
'.ogg': 'audio/ogg',
'.aac': 'audio/aac'
}
file_ext = os.path.splitext(audio_name)[1].lower()
mime_type = mime_types.get(file_ext, 'audio/wav')
payload = {
"uploads": [
{
"data": audio_url,
"type": "url",
"name": audio_name,
"mime": mime_type
}
]
}
# Add question if provided
if question:
payload["question"] = question
try:
response = requests.post(url, json=payload)
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
print(f"Error: {e}")
return None
# Example usage with public audio URL
result = upload_audio_url(
flow_id="your-flow-id",
audio_url="https://example.com/path/to/speech.mp3",
question="Please transcribe this audio and provide a summary",
audio_name="speech_recording.mp3"
)
print(result)
# Example with direct URL (no custom name or question)
result2 = upload_audio_url(
flow_id="your-flow-id",
audio_url="https://storage.googleapis.com/sample-audio/speech.wav"
)
print(result2)
# Example for meeting transcription
result3 = upload_audio_url(
flow_id="your-flow-id",
audio_url="https://meetings.example.com/recording-123.m4a",
question="Transcribe this meeting recording and extract key action items and decisions made",
audio_name="team_meeting_jan15.m4a"
)
print(result3)
async function uploadAudio(flowId, audioFile, question = null) {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = async function(e) {
const base64Data = e.target.result;
const payload = {
uploads: [
{
data: base64Data,
type: 'audio',
name: audioFile.name,
mime: audioFile.type
}
]
};
// Add question if provided
if (question) {
payload.question = question;
}
try {
const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const result = await response.json();
resolve(result);
} catch (error) {
reject(error);
}
};
reader.onerror = function() {
reject(new Error('Failed to read file'));
};
reader.readAsDataURL(audioFile);
});
}
// Example usage with file input
document.getElementById('audioInput').addEventListener('change', async function(e) {
const file = e.target.files[0];
if (file) {
try {
const result = await uploadAudio(
'your-flow-id',
file,
'Please transcribe this audio and summarize the content'
);
console.log('Transcription result:', result);
} catch (error) {
console.error('Upload failed:', error);
}
}
});
async function uploadAudioUrl(flowId, audioUrl, question = null, audioName = null) {
/**
* Upload an audio file using a URL instead of base64 encoding.
* This is more efficient for audio files that are already hosted online.
*/
// Extract filename from URL if not provided
if (!audioName) {
audioName = audioUrl.split('/').pop();
if (audioName.includes('?')) {
audioName = audioName.split('?')[0];
}
}
// Determine MIME type from URL extension
const mimeTypes = {
'.webm': 'audio/webm',
'.wav': 'audio/wav',
'.mp3': 'audio/mpeg',
'.m4a': 'audio/mp4',
'.ogg': 'audio/ogg',
'.aac': 'audio/aac'
};
const fileExt = audioName.toLowerCase().substring(audioName.lastIndexOf('.'));
const mimeType = mimeTypes[fileExt] || 'audio/wav';
const payload = {
uploads: [
{
data: audioUrl,
type: 'url',
name: audioName,
mime: mimeType
}
]
};
// Add question if provided
if (question) {
payload.question = question;
}
try {
const response = await fetch(`http://localhost:3000/api/v1/prediction/${flowId}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify(payload)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
return await response.json();
} catch (error) {
console.error('Error:', error);
return null;
}
}
// Example usage with public audio URL
async function transcribeAudioFromUrl() {
try {
const result = await uploadAudioUrl(
'your-flow-id',
'https://example.com/path/to/speech.mp3',
'Please transcribe this audio and provide a summary',
'speech_recording.mp3'
);
console.log('Transcription result:', result);
} catch (error) {
console.error('Upload failed:', error);
}
}
// Example with direct URL (no custom name or question)
uploadAudioUrl(
'your-flow-id',
'https://storage.googleapis.com/sample-audio/speech.wav'
).then(result => {
if (result) {
console.log('Transcription result:', result);
}
});
// Example for meeting transcription
uploadAudioUrl(
'your-flow-id',
'https://meetings.example.com/recording-123.m4a',
'Transcribe this meeting recording and extract key action items and decisions made',
'team_meeting_jan15.m4a'
).then(result => {
if (result) {
console.log('Meeting analysis:', result);
}
});
// Example with multiple audio URLs for batch processing
async function transcribeMultipleAudios() {
const audioUrls = [
{
url: 'https://example.com/interview1.wav',
question: 'Transcribe this interview and summarize key points',
name: 'interview_candidate_1.wav'
},
{
url: 'https://example.com/interview2.mp3',
question: 'Transcribe this interview and summarize key points',
name: 'interview_candidate_2.mp3'
},
{
url: 'https://example.com/lecture.m4a',
question: 'Transcribe this lecture and create bullet-point notes',
name: 'cs101_lecture.m4a'
}
];
const results = await Promise.all(
audioUrls.map(audio =>
uploadAudioUrl(
'your-flow-id',
audio.url,
audio.question,
audio.name
)
)
);
results.forEach((result, index) => {
console.log(`Audio ${index + 1} transcription:`, result);
});
}
File Uploads
Upload files to have LLM process the files and answer query related to the files. Refer to Files for more reference.
Human Input
To resume the execution from a previously stopped checkpoint, humanInput needs to be provided. Refer Human In The Loop for details.
Human Input Structure
{
"type": "",
"feedback": ""
}
type: Either proceed or reject
feedback: Feedback to the last output
Must specify the same sessionId where the execution was stopped