Get Started with Consensus Sentry
Integrate decentralized AI guardrails into your application in minutes
npm install consensus-sentryBasic Usage
import { ConsensusSentry } from 'consensus-sentry';
// Initialize with your API key
const sentry = new ConsensusSentry({
apiKey: 'your_api_key',
environment: 'production'
});
// Check content against guardrails
async function validateContent(content) {
try {
const result = await sentry.validate(content);
if (result.valid) {
console.log('Content passed all guardrails');
return true;
} else {
console.log('Content violated guardrails:', result.violations);
return false;
}
} catch (error) {
console.error('Error validating content:', error);
return false;
}
}Integration with AI Models
import { ConsensusSentry } from 'consensus-sentry';
import { OpenAI } from 'openai';
// Initialize services
const sentry = new ConsensusSentry({ apiKey: 'your_sentry_api_key' });
const openai = new OpenAI({ apiKey: 'your_openai_api_key' });
// Create a guardrail-protected AI response function
async function generateSafeResponse(prompt) {
// First, validate the input prompt
const promptCheck = await sentry.validate(prompt);
if (!promptCheck.valid) {
return {
success: false,
message: 'Input violates content policy',
violations: promptCheck.violations
};
}
// Generate AI response
const completion = await openai.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: prompt }]
});
const aiResponse = completion.choices[0].message.content;
// Validate the AI response
const responseCheck = await sentry.validate(aiResponse);
if (!responseCheck.valid) {
return {
success: false,
message: 'AI response violates content policy',
violations: responseCheck.violations
};
}
// Return safe response
return {
success: true,
response: aiResponse
};
}Custom Guardrail Configuration
import { ConsensusSentry } from 'consensus-sentry';
// Initialize with custom guardrail configuration
const sentry = new ConsensusSentry({
apiKey: 'your_api_key',
guardrails: {
// Enable specific built-in guardrails
builtIn: {
toxicity: { enabled: true, threshold: 0.8 },
pii: { enabled: true },
profanity: { enabled: true, severity: 'medium' },
hate: { enabled: true, threshold: 0.7 },
violence: { enabled: true }
},
// Add custom rules
custom: [
{
id: 'company-specific',
name: 'Company Specific Policy',
description: 'Enforces company-specific content guidelines',
rules: [
{
type: 'keyword',
pattern: ['competitor-name-1', 'competitor-name-2'],
action: 'flag'
},
{
type: 'semantic',
category: 'company-secrets',
threshold: 0.85,
action: 'block'
}
]
}
],
// Configure actions for violations
actions: {
block: { enabled: true },
flag: { enabled: true, reviewQueue: 'content-moderation' },
log: { enabled: true, destination: 'blockchain' }
}
}
});Key Features
Simple API
Easy-to-use API with comprehensive documentation and examples
Decentralized Storage
Rules and audit logs stored on blockchain for transparency
Customizable Rules
Define and modify AI guardrails to match your specific requirements
Real-time Validation
Fast, reliable content validation with detailed violation reports
Ready to Get Started?
Sign up for an API key and start building safer AI applications today
Get API Key