Real-World Examples
Discover how leading companies use NCS-API for clustering solutions across industries. From customer segmentation to fraud detection, explore practical implementations with code examples.
Customer Segmentation for Retail
Segment customers based on purchase behavior, demographics, and engagement patterns to create targeted marketing campaigns with 40% higher conversion rates.
Real-time Fraud Detection
Detect fraudulent transactions in real-time by clustering normal vs. anomalous behavior patterns, reducing false positives by 65%.
Market Research & Analysis
Analyze consumer preferences and market trends by clustering survey responses and behavioral data to identify emerging market opportunities.
Influencer & Content Analysis
Cluster social media users by engagement patterns and content preferences to identify micro-influencers and optimize content strategy.
Patient Risk Stratification
Group patients by risk factors and treatment responses to enable personalized care plans and early intervention strategies.
Product Recommendation Engine
Create product clusters based on purchase patterns and user preferences to power recommendation systems with 180% higher click-through rates.
Success Story: TechCorp's Customer Journey Revolution
How TechCorp used NCS-API to increase customer lifetime value by 45% through intelligent segmentation
Implementation Examples
Ready-to-use code examples for common clustering scenarios. Copy, paste, and customize for your needs.
# Customer Segmentation with NCS-API
import pandas as pd
from ncs_api import NCSClient
# Initialize client
client = NCSClient(api_key='your-api-key')
# Load customer data
customers = pd.read_csv('customer_data.csv')
# Prepare features for clustering
features = [
'annual_income', 'spending_score', 'age',
'years_customer', 'num_purchases'
]
# Run NCS clustering
result = client.cluster(
algorithm='ncs',
data=customers[features].to_dict('records'),
parameters={
'neural_depth': 4,
'learning_rate': 0.03,
'max_clusters': 8
}
)
# Add cluster labels to dataframe
customers['cluster'] = result['clusters']
# Analyze segments
segment_analysis = customers.groupby('cluster').agg({
'annual_income': 'mean',
'spending_score': 'mean',
'age': 'mean',
'num_purchases': 'sum'
}).round(2)
print("Customer Segments:")
print(segment_analysis)
# Identify high-value segments
high_value_segments = segment_analysis[
(segment_analysis['annual_income'] > 50000) &
(segment_analysis['spending_score'] > 70)
].index.tolist()
print(f"High-value segments: {high_value_segments}")
print(f"Silhouette score: {result['metrics']['silhouette_score']}")
// Real-time Fraud Detection with NCS-API
const { NCSClient } = require('@ncs-api/client');
const client = new NCSClient({
apiKey: process.env.NCS_API_KEY,
baseURL: 'https://api.ncs-clustering.com/v1'
});
// Real-time transaction analysis
async function analyzeTransaction(transaction) {
try {
// Extract features
const features = {
amount: transaction.amount,
merchant_category: transaction.merchant_category,
hour_of_day: new Date(transaction.timestamp).getHours(),
is_weekend: isWeekend(transaction.timestamp),
location_risk_score: await getLocationRisk(transaction.location),
velocity_score: await getVelocityScore(transaction.user_id),
device_risk_score: await getDeviceRisk(transaction.device_id)
};
// Run clustering analysis
const result = await client.cluster({
algorithm: 'ncs',
data: [features],
parameters: {
neural_depth: 3,
learning_rate: 0.05
},
reference_clusters: await getReferenceClusters() // Historical patterns
});
// Determine if transaction is anomalous
const cluster = result.clusters[0];
const anomaly_score = result.anomaly_scores[0];
if (anomaly_score > 0.85) {
await flagAsFraud(transaction, {
cluster,
anomaly_score,
processing_time: result.processing_time
});
return { status: 'fraud', confidence: anomaly_score };
}
return { status: 'normal', confidence: 1 - anomaly_score };
} catch (error) {
console.error('Fraud detection error:', error);
return { status: 'error', message: error.message };
}
}
// Helper functions
function isWeekend(timestamp) {
const day = new Date(timestamp).getDay();
return day === 0 || day === 6;
}
async function getLocationRisk(location) {
// Implementation for location risk scoring
return Math.random(); // Placeholder
}
async function getVelocityScore(userId) {
// Implementation for transaction velocity analysis
return Math.random(); // Placeholder
}
// Example usage
const transaction = {
id: 'txn_123456',
user_id: 'user_789',
amount: 2500.00,
merchant_category: 'electronics',
timestamp: new Date().toISOString(),
location: { lat: 40.7128, lng: -74.0060 },
device_id: 'device_abc'
};
analyzeTransaction(transaction)
.then(result => console.log('Analysis result:', result));
# Product Recommendation System with NCS-API
import numpy as np
import pandas as pd
from ncs_api import NCSClient
from sklearn.preprocessing import StandardScaler
class RecommendationEngine:
def __init__(self, api_key):
self.client = NCSClient(api_key=api_key)
self.scaler = StandardScaler()
self.product_clusters = None
self.user_profiles = None
def train_product_clusters(self, product_data):
"""Cluster products based on features and user interactions"""
# Prepare product features
features = [
'price', 'rating', 'num_reviews', 'category_id',
'brand_popularity', 'seasonal_score', 'view_count',
'purchase_count', 'cart_add_count'
]
# Normalize features
normalized_data = self.scaler.fit_transform(product_data[features])
product_features = pd.DataFrame(
normalized_data,
columns=features,
index=product_data.index
)
# Run clustering
result = self.client.cluster(
algorithm='ncs',
data=product_features.to_dict('records'),
parameters={
'neural_depth': 5,
'learning_rate': 0.02,
'max_clusters': 25
}
)
# Store results
self.product_clusters = pd.DataFrame({
'product_id': product_data['product_id'],
'cluster': result['clusters'],
'cluster_strength': result['cluster_probabilities']
})
return result['metrics']
def build_user_profiles(self, user_interactions):
"""Build user preference profiles based on interaction history"""
# Calculate user preferences by cluster
user_cluster_prefs = []
for user_id in user_interactions['user_id'].unique():
user_data = user_interactions[
user_interactions['user_id'] == user_id
]
# Merge with product clusters
user_products = user_data.merge(
self.product_clusters,
on='product_id'
)
# Calculate cluster preferences
cluster_weights = user_products.groupby('cluster').agg({
'rating': 'mean',
'purchase_count': 'sum',
'view_time': 'mean'
}).fillna(0)
# Normalize preferences
cluster_prefs = (
cluster_weights['rating'] * 0.4 +
np.log1p(cluster_weights['purchase_count']) * 0.4 +
np.log1p(cluster_weights['view_time']) * 0.2
)
user_cluster_prefs.append({
'user_id': user_id,
'preferences': cluster_prefs.to_dict()
})
self.user_profiles = pd.DataFrame(user_cluster_prefs)
def get_recommendations(self, user_id, num_recommendations=10):
"""Generate product recommendations for a user"""
# Get user preferences
user_prefs = self.user_profiles[
self.user_profiles['user_id'] == user_id
]['preferences'].iloc[0]
# Score products based on cluster preferences
scored_products = []
for _, product in self.product_clusters.iterrows():
cluster = product['cluster']
cluster_strength = product['cluster_strength']
user_cluster_pref = user_prefs.get(cluster, 0)
score = user_cluster_pref * cluster_strength
scored_products.append({
'product_id': product['product_id'],
'score': score,
'cluster': cluster
})
# Sort by score and return top recommendations
recommendations = sorted(
scored_products,
key=lambda x: x['score'],
reverse=True
)[:num_recommendations]
return recommendations
# Example usage
engine = RecommendationEngine(api_key='your-api-key')
# Load data
products = pd.read_csv('products.csv')
interactions = pd.read_csv('user_interactions.csv')
# Train the system
metrics = engine.train_product_clusters(products)
print(f"Clustering quality: {metrics['silhouette_score']}")
engine.build_user_profiles(interactions)
# Get recommendations
recommendations = engine.get_recommendations('user_123', num_recommendations=5)
print("Recommendations:", recommendations)