crisis-detectorDetects suicide ideation, self-harm, and mental health crises using NLP and sentiment analysis, providing real-time alerts and connecting users to help resou...
Install via ClawdBot CLI:
clawdbot install raghulpasupathi/crisis-detector@raghulpasupathi/crisis-detectorLife-saving crisis detection system for identifying self-harm, suicide ideation, and mental health emergencies. Uses NLP, sentiment analysis, and behavioral patterns to detect users in crisis and connect them with immediate help resources.
https://clawhub.ai/raghulpasupathi/crisis-detector
npm install @raghulpasupathi/crisis-detector
{
"enabled": true,
"settings": {
"detectionMode": "sensitive",
"modes": {
"sensitive": {
"ideation": 0.40,
"planning": 0.30,
"imminent": 0.20,
"selfHarm": 0.35,
"actionThreshold": 0.40
},
"moderate": {
"ideation": 0.60,
"planning": 0.50,
"imminent": 0.40,
"selfHarm": 0.55,
"actionThreshold": 0.60
},
"conservative": {
"ideation": 0.75,
"planning": 0.65,
"imminent": 0.55,
"selfHarm": 0.70,
"actionThreshold": 0.75
}
},
"detection": {
"textAnalysis": {
"enabled": true,
"contextAware": true,
"historicalContext": true
},
"behaviorAnalysis": {
"enabled": true,
"trackPatterns": true,
"abnormalityDetection": true
},
"sentimentAnalysis": {
"enabled": true,
"depressionIndicators": true,
"hopelessnessDetection": true
}
},
"resources": {
"crisisHotlines": {
"enabled": true,
"international": true,
"textServices": true,
"chatServices": true
},
"mentalHealthResources": {
"enabled": true,
"therapistDirectory": true,
"selfHelpResources": true,
"supportGroups": true
},
"emergencyServices": {
"enabled": true,
"localEmergency": true,
"wellnessCheck": false
}
},
"response": {
"automaticMessage": true,
"messageTemplate": "caring",
"resourceDisplay": "immediate",
"followUp": true,
"humanOutreach": true
},
"actions": {
"onIdeation": [
"show_resources",
"send_caring_message",
"notify_safety_team",
"enable_monitoring"
],
"onPlanning": [
"show_resources_urgent",
"immediate_outreach",
"notify_emergency_contacts",
"enable_intensive_monitoring"
],
"onImminent": [
"emergency_intervention",
"contact_authorities",
"notify_emergency_contacts",
"continuous_monitoring"
]
},
"privacy": {
"respectUserPrivacy": true,
"informedConsent": true,
"dataMinimization": true,
"confidentialLogging": true
},
"languages": ["en", "es", "fr", "de", "pt", "it", "ja", "ko", "zh", "ar", "hi", "ru"]
}
}
const CrisisDetector = require('@raghulpasupathi/crisis-detector');
// Initialize detector
const detector = new CrisisDetector({
detectionMode: 'sensitive',
enableResources: true
});
// Analyze text for crisis signals
const result = await detector.analyze('I don\'t want to be here anymore...');
console.log(result);
/* Output:
{
isCrisis: true,
severity: 'high',
urgency: 'immediate',
confidence: 0.87,
categories: {
suicideIdeation: 0.89,
selfHarm: 0.12,
depression: 0.78,
hopelessness: 0.82
},
intent: {
type: 'ideation',
planning: false,
imminent: false,
meansIdentified: false
},
riskLevel: 'high',
indicators: [
{ type: 'suicide_ideation', phrase: "don't want to be here", confidence: 0.91 },
{ type: 'hopelessness', phrase: "anymore", confidence: 0.75 }
],
sentiment: {
overall: -0.85,
depression: 0.82,
anxiety: 0.45,
hopelessness: 0.88
},
recommendedAction: 'immediate_intervention',
resources: {
crisisHotlines: [
{
name: 'National Suicide Prevention Lifeline',
phone: '988',
text: 'Text HOME to 741741',
chat: 'https://suicidepreventionlifeline.org/chat/',
available: '24/7'
},
{
name: 'Crisis Text Line',
text: 'Text HELLO to 741741',
available: '24/7'
}
],
emergencyServices: {
call: '911',
text: 'Text 911 (where available)'
}
},
suggestedMessage: "I'm concerned about you. You're not alone, and there are people who can help. Please reach out to the National Suicide Prevention Lifeline at 988 - they're available 24/7 and want to support you.",
timestamp: '2026-02-20T10:30:00Z'
}
*/
// Quick crisis check
const isCrisis = await detector.isCrisis('Some text to check');
// Assess crisis severity
const severity = await detector.assessSeverity('Text expressing distress');
console.log(severity);
/* Output:
{
level: 'high',
score: 0.84,
urgency: 'immediate',
riskFactors: [
'suicide_ideation',
'hopelessness',
'social_isolation'
],
protectiveFactors: [
'help_seeking'
]
}
*/
// Detect specific crisis types
const suicideRisk = await detector.detectSuicideRisk('Text to analyze');
const selfHarmRisk = await detector.detectSelfHarmRisk('Text to analyze');
// Analyze user behavior patterns
const behaviorAnalysis = await detector.analyzeUserBehavior(userId, {
recentMessages: messages,
activityChanges: activityData,
timeRange: '30d'
});
console.log(behaviorAnalysis);
/* Output:
{
concernLevel: 'elevated',
patterns: [
{
type: 'social_withdrawal',
detected: true,
confidence: 0.78,
description: 'Decreased interaction frequency by 65%'
},
{
type: 'negative_content_increase',
detected: true,
confidence: 0.82,
description: 'Increased negative sentiment in 78% of recent posts'
},
{
type: 'activity_time_change',
detected: true,
confidence: 0.71,
description: 'Shift to late-night activity (12am-4am)'
}
],
riskLevel: 'moderate-high',
recommendation: 'enhanced_monitoring'
}
*/
// Get crisis resources for location
const resources = await detector.getResources({
country: 'US',
state: 'CA',
language: 'en',
services: ['crisis_hotline', 'text_line', 'chat_support']
});
// Generate empathetic response
const response = await detector.generateResponse({
severity: 'high',
intent: 'ideation',
includeResources: true,
tone: 'caring'
});
// Track at-risk user
await detector.trackUser(userId, {
riskLevel: 'high',
monitoringIntensity: 'enhanced',
alertContacts: true
});
// Alert safety team
await detector.alertSafetyTeam({
userId: userId,
severity: 'critical',
analysis: analysisResult,
requiresImmediate: true
});
// Send caring outreach
await detector.sendOutreach(userId, {
type: 'caring_message',
includeResources: true,
fromHuman: true
});
// Check user status
const status = await detector.getUserStatus(userId);
console.log(status);
/* Output:
{
userId: 'user-123',
currentRiskLevel: 'moderate',
monitoringStatus: 'enhanced',
lastCrisisDetection: '2026-02-18T14:30:00Z',
outreachAttempts: 2,
resourcesProvided: true,
emergencyContactNotified: false,
trend: 'stable'
}
*/
// Event listeners
detector.on('crisisDetected', async (crisis) => {
console.warn('⚠️ CRISIS DETECTED:', crisis);
// Immediate response
await detector.sendOutreach(crisis.userId, {
severity: crisis.severity,
resources: crisis.resources
});
// Notify safety team
await detector.alertSafetyTeam(crisis);
});
detector.on('imminentRisk', async (risk) => {
console.error('🚨 IMMINENT RISK DETECTED');
// Emergency intervention
await detector.executeEmergencyProtocol(risk);
// Consider wellness check
if (risk.severity === 'critical') {
await detector.considerWellnessCheck(risk);
}
});
detector.on('improvementDetected', (update) => {
console.log('✓ User showing improvement:', update);
// Continue support
await detector.sendEncouragement(update.userId);
});
// Performance stats
const stats = detector.getStats();
console.log(stats);
/* Output:
{
totalAnalyses: 100000,
crisisDetected: 1250,
bySeverity: {
low: 400,
moderate: 550,
high: 250,
critical: 50
},
interventions: 1250,
resourcesProvided: 1250,
emergencyContacts: 75,
positiveOutcomes: 980,
averageResponseTime: '45s'
}
*/
Problem: Too many non-crisis messages flagged
Solution:
Problem: Not detecting users in genuine crisis
Solution:
Problem: Users concerned about monitoring
Solution:
Problem: Crisis resources not available in user's location
Solution:
Problem: Unsure if interventions are helping
Solution:
// Complete platform integration
const express = require('express');
const CrisisDetector = require('@raghulpasupathi/crisis-detector');
const app = express();
const detector = new CrisisDetector({ detectionMode: 'sensitive' });
// At-risk user tracking
const atRiskUsers = new Map();
// Monitor all user-generated content
app.post('/api/posts/create', async (req, res) => {
try {
const { userId, content } = req.body;
// Analyze for crisis signals
const analysis = await detector.analyze(content);
if (analysis.isCrisis) {
console.warn(`⚠️ Crisis detected for user ${userId}`);
// Log for safety team (confidential)
await logCrisisEvent(userId, analysis);
// Immediate response based on severity
if (analysis.severity === 'critical' || analysis.intent.imminent) {
// CRITICAL: Imminent risk
console.error(`🚨 IMMINENT RISK: User ${userId}`);
// Show resources immediately
await showEmergencyResources(userId, analysis.resources);
// Alert safety team for immediate outreach
await alertSafetyTeam({
userId: userId,
severity: 'critical',
analysis: analysis,
urgent: true
});
// Consider emergency services
if (analysis.riskLevel === 'critical') {
await considerEmergencyServices(userId, analysis);
}
// Track intensively
await detector.trackUser(userId, {
riskLevel: 'critical',
monitoringIntensity: 'maximum'
});
} else if (analysis.severity === 'high') {
// High risk: Immediate support
await sendCaringMessage(userId, {
message: analysis.suggestedMessage,
resources: analysis.resources
});
await alertSafetyTeam({
userId: userId,
severity: 'high',
analysis: analysis,
urgent: false
});
await detector.trackUser(userId, {
riskLevel: 'high',
monitoringIntensity: 'enhanced'
});
} else {
// Moderate risk: Show resources
await showSupportResources(userId, analysis.resources);
await detector.trackUser(userId, {
riskLevel: 'moderate',
monitoringIntensity: 'standard'
});
}
// Update tracking
atRiskUsers.set(userId, {
lastDetection: new Date(),
severity: analysis.severity,
analysis: analysis
});
}
// Allow post (don't censor crisis content)
const post = await createPost(userId, content);
res.json({
success: true,
postId: post.id
});
} catch (error) {
res.status(500).json({ error: error.message });
}
});
// Background monitoring of at-risk users
setInterval(async () => {
for (const [userId, tracking] of atRiskUsers) {
// Check recent activity
const behavior = await detector.analyzeUserBehavior(userId, {
timeRange: '24h'
});
if (behavior.concernLevel === 'elevated') {
// Send follow-up
await sendFollowUp(userId, {
type: 'check_in',
resources: true
});
} else if (behavior.concernLevel === 'improved') {
// Positive trend
await sendEncouragement(userId);
// Reduce monitoring intensity
await detector.trackUser(userId, {
riskLevel: 'low',
monitoringIntensity: 'minimal'
});
}
}
}, 6 * 60 * 60 * 1000); // Every 6 hours
// User dashboard - show resources
app.get('/api/mental-health/resources', async (req, res) => {
const userId = req.user.id;
const resources = await detector.getResources({
country: req.user.country,
language: req.user.language
});
res.json({
success: true,
resources: resources,
message: 'You\'re not alone. Help is available 24/7.'
});
});
// Safety team dashboard
app.get('/admin/safety/crisis-monitor', requireSafetyTeam, async (req, res) => {
const activeAlerts = await getActiveCrisisAlerts();
// Add current status for each
const enriched = await Promise.all(
activeAlerts.map(async alert => {
const status = await detector.getUserStatus(alert.userId);
return { ...alert, status };
})
);
res.json({
success: true,
alerts: enriched,
count: enriched.length
});
});
// Helper functions
async function sendCaringMessage(userId, { message, resources }) {
await sendNotification(userId, {
title: 'We\'re here for you',
body: message,
action: {
text: 'Get help now',
url: '/mental-health/resources'
},
priority: 'high'
});
// Log outreach attempt
await logOutreach(userId, 'caring_message');
}
async function considerEmergencyServices(userId, analysis) {
// This is a sensitive decision requiring human judgment
await alertSafetyTeam({
userId: userId,
message: 'CRITICAL: Consider wellness check',
analysis: analysis,
requiresDecision: true
});
}
Working with crisis detection is emotionally demanding:
Generated Mar 1, 2026
Integrate the Crisis Detector into mental health support chatbots to monitor user conversations in real-time. It can identify high-risk statements and automatically provide crisis resources or escalate to human counselors, ensuring timely intervention for users expressing suicidal thoughts.
Deploy the skill on social media platforms to scan posts and direct messages for self-harm or suicide ideation signals. It can trigger alerts to platform moderators, offer support resources to at-risk users, and help comply with safety regulations by proactively addressing mental health emergencies.
Use the Crisis Detector in corporate wellness apps to analyze employee feedback or chat logs for signs of distress. It can connect employees to mental health resources, notify HR for follow-up, and track patterns to improve workplace mental health initiatives.
Implement the skill in university or school counseling systems to monitor student communications and online forums. It can detect crises early, provide immediate hotline information, and alert campus safety teams for urgent cases, enhancing student mental health services.
Integrate the detector into telehealth platforms to assess patient messages or session transcripts for suicide risk. It can supplement clinician evaluations by flagging high-severity cases, suggesting interventions, and automating resource sharing during virtual consultations.
Offer the Crisis Detector as a cloud-based service with tiered subscription plans based on usage volume and features. Revenue comes from monthly or annual fees charged to businesses like healthcare providers or social platforms, with add-ons for custom integrations or premium support.
Monetize the skill through an API that charges per analysis request or data processed. This model suits developers and companies with variable usage, allowing them to scale costs based on demand while generating revenue from high-volume users in industries like tech or education.
Sell enterprise licenses for on-premise or custom deployments to large organizations such as corporations or government agencies. Revenue includes upfront licensing fees, implementation support, and ongoing maintenance contracts, targeting clients needing dedicated, secure crisis detection systems.
💬 Integration Tip
Start by configuring the detectionMode to 'moderate' for balanced sensitivity, and ensure privacy settings like informedConsent are enabled to comply with data protection regulations.
iMessage/SMS CLI for listing chats, history, watch, and sending.
Use when you need to control Discord from Clawdbot via the discord tool: send messages, react, post or upload stickers, upload emojis, run polls, manage threads/pins/search, fetch permissions or member/role/channel info, or handle moderation actions in Discord DMs or channels.
Use when you need to control Slack from Clawdbot via the slack tool, including reacting to messages or pinning/unpinning items in Slack channels or DMs.
Send WhatsApp messages to other people or search/sync WhatsApp history via the wacli CLI (not for normal user chats).
Build or update the BlueBubbles external channel plugin for Clawdbot (extension package, REST send/probe, webhook inbound).
OpenClaw skill for designing Telegram Bot API workflows and command-driven conversations using direct HTTPS requests (no SDKs).