Examples

Examples

Real-world implementations showing how to use the MCP Proxy Wrapper in different scenarios.

Basic AI Service

A simple AI analysis service with logging and caching plugins:

import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { wrapWithProxy, LLMSummarizationPlugin, ChatMemoryPlugin } from 'mcp-proxy-wrapper';
import { z } from 'zod';
 
// Create base server
const server = new McpServer({
  name: 'ai-analysis-service',
  version: '1.0.0'
});
 
// Configure enhancement plugins
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: 'mock', // Use 'openai' for production
    summarizeTools: ['sentiment-analysis', 'text-summary'],
    minContentLength: 50
  }
});
 
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    saveResponses: true,
    maxEntries: 100
  }
});
 
// Wrap with proxy
const proxiedServer = await wrapWithProxy(server, {
  plugins: [summaryPlugin, memoryPlugin]
});
 
// Register analysis tools
proxiedServer.tool('sentiment-analysis', {
  text: z.string().min(1, 'Text is required'),
  language: z.string().optional()
}, async (args) => {
  const sentiment = await analyzeSentiment(args.text, args.language);
  
  return {
    content: [{
      type: 'text',
      text: JSON.stringify({
        sentiment: sentiment.label,
        confidence: sentiment.confidence,
        text: args.text
      }, null, 2)
    }]
  };
});
 
proxiedServer.tool('text-summary', {
  text: z.string().min(10, 'Text must be at least 10 characters'),
  maxLength: z.number().optional().default(100)
}, async (args) => {
  const summary = await generateSummary(args.text, args.maxLength);
  
  return {
    content: [{
      type: 'text', 
      text: summary
    }]
  };
});
 
// Start server
const transport = new StdioServerTransport();
await proxiedServer.connect(transport);

Multi-Tenant SaaS Platform

A complete SaaS platform with authentication and rate limiting:

// Enhanced SaaS platform with AI summarization and memory
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: 'openai',
    openaiApiKey: process.env.OPENAI_API_KEY,
    model: 'gpt-4o-mini',
    summarizeTools: ['market-analysis', 'competitor-research'],
    minContentLength: 500, // Longer threshold for business data
    saveOriginal: true
  }
});
 
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    saveResponses: true,
    enableChat: true,
    maxEntries: 5000, // Higher limit for business use
    sessionTimeout: 7 * 24 * 60 * 60 * 1000 // 1 week
  }
});
 
const proxiedServer = await wrapWithProxy(server, {
  plugins: [memoryPlugin, summaryPlugin] // Memory first, then summarization
});
 
// Business intelligence tools with AI enhancement
proxiedServer.tool('market-analysis', {
  company: z.string(),
  metrics: z.array(z.string()),
  timeframe: z.enum(['1M', '3M', '6M', '1Y']),
  userId: z.string().optional()
}, async (args) => {
  const analysis = await performMarketAnalysis(args);
  // Plugin automatically summarizes complex analysis data
  return { content: [{ type: 'text', text: JSON.stringify(analysis, null, 2) }] };
});
 
proxiedServer.tool('competitor-research', {
  industry: z.string(),
  region: z.string().optional(),
  userId: z.string().optional()
}, async (args) => {
  const research = await conductCompetitorResearch(args);
  // Plugin saves research to memory for future reference
  return { content: [{ type: 'text', text: JSON.stringify(research, null, 2) }] };
});

Gaming Platform with Usage Tracking

A gaming service with usage analytics and caching:

// Gaming platform with memory and AI summarization
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    saveResponses: true,
    enableChat: true,
    maxEntries: 2000, // Store lots of game sessions
    maxSessions: 500, // Support many concurrent players
    excludeTools: [] // Save all gaming tools
  }
});
 
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: 'openai',
    openaiApiKey: process.env.OPENAI_API_KEY,
    summarizeTools: ['ai-dungeon-master'], // Summarize long narrative responses
    minContentLength: 200,
    saveOriginal: true
  }
});
 
const proxiedServer = await wrapWithProxy(server, {
  plugins: [memoryPlugin, summaryPlugin]
});
 
// Gaming tools with user tracking
proxiedServer.tool('generate-character', {
  class: z.enum(['warrior', 'mage', 'rogue', 'cleric']),
  level: z.number().min(1).max(20),
  background: z.string().optional(),
  userId: z.string()
}, async (args) => {
  const character = await generateCharacter(args);
  return { content: [{ type: 'text', text: JSON.stringify(character) }] };
});
 
proxiedServer.tool('ai-dungeon-master', {
  scenario: z.string(),
  playerAction: z.string(),
  context: z.string().optional(),
  userId: z.string()
}, async (args) => {
  const response = await generateDMResponse(args);
  return { content: [{ type: 'text', text: response }] };
});

Development Tools API

A developer-focused API with comprehensive logging and metadata:

// Developer tools with AI summarization and memory
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: 'openai',
    openaiApiKey: process.env.OPENAI_API_KEY,
    summarizeTools: ['code-review', 'security-scan'],
    minContentLength: 300, // Code reviews can be long
    saveOriginal: true
  }
});
 
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    saveResponses: true,
    enableChat: true,
    maxEntries: 1000,
    sessionTimeout: 2 * 24 * 60 * 60 * 1000 // 2 days for dev work
  }
});
 
const proxiedServer = await wrapWithProxy(server, {
  plugins: [memoryPlugin, summaryPlugin]
});
 
// Development tools with AI enhancement
proxiedServer.tool('code-review', {
  code: z.string(),
  language: z.string(),
  focusAreas: z.array(z.enum(['security', 'performance', 'maintainability', 'style'])).optional(),
  userId: z.string().optional()
}, async (args) => {
  const review = await performCodeReview(args);
  // Plugin automatically summarizes detailed code review results
  return { content: [{ type: 'text', text: JSON.stringify(review, null, 2) }] };
});
 
proxiedServer.tool('security-scan', {
  code: z.string(),
  language: z.string(),
  scanType: z.enum(['static', 'dependency', 'comprehensive']).default('comprehensive'),
  userId: z.string().optional()
}, async (args) => {
  const vulnerabilities = await scanForVulnerabilities(args);
  // Plugin saves scan results to memory for future reference
  return { content: [{ type: 'text', text: JSON.stringify(vulnerabilities, null, 2) }] };
});

Blockchain Analytics Server

Enhancing a Web3 analytics server with AI summarization and memory capabilities:

import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { wrapWithProxy, LLMSummarizationPlugin, ChatMemoryPlugin } from 'mcp-proxy-wrapper';
import { z } from 'zod';
 
// Create blockchain analytics server
const server = new McpServer({
  name: 'web3-stats-server-enhanced',
  version: '2.0.0'
}, {
  capabilities: { tools: {} }
});
 
// Configure AI enhancement plugins
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: process.env.NODE_ENV === 'production' ? 'openai' : 'mock',
    openaiApiKey: process.env.OPENAI_API_KEY, // Set via environment
    model: 'gpt-4o-mini',
    summarizeTools: [
      'get_evm_balances', 
      'get_evm_transactions',
      'get_evm_collectibles',
      'get_token_holders'
    ],
    minContentLength: 200, // Blockchain data can be verbose
    saveOriginal: true
  },
  enabled: true,
  priority: 10
});
 
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    provider: process.env.NODE_ENV === 'production' ? 'openai' : 'mock',
    openaiApiKey: process.env.OPENAI_API_KEY, // Set via environment
    saveResponses: true,
    enableChat: true,
    maxEntries: 2000, // Store lots of blockchain queries
    sessionTimeout: 24 * 60 * 60 * 1000, // 24 hours for analysis sessions
    excludeTools: ['ping_dune_server', 'ping_blockscout'] // Skip health checks
  },
  enabled: true,
  priority: 20
});
 
// Wrap server with AI enhancement
const proxiedServer = await wrapWithProxy(server, {
  plugins: [
    { plugin: memoryPlugin, config: { priority: 20 } },
    { plugin: summaryPlugin, config: { priority: 10 } }
  ],
  debug: process.env.NODE_ENV !== 'production'
});
 
// Blockchain analysis tools (now AI-enhanced)
proxiedServer.tool('get_evm_balances', 'Get wallet token balances with AI insights', {
  walletAddress: z.string().describe('EVM wallet address (0x...)'),
  chainId: z.string().optional().describe('Chain ID (1 for Ethereum)'),
  limit: z.number().optional().describe('Max results to return')
}, async ({ walletAddress, chainId = '1', limit = 50 }) => {
  // Call external blockchain API (Dune, Alchemy, etc.)
  const balances = await fetchWalletBalances(walletAddress, chainId, limit);
  
  return {
    content: [{
      type: 'text',
      text: JSON.stringify(balances, null, 2)
    }]
  };
  // AI plugin will automatically:
  // 1. Generate summary: "This wallet holds $X.XX across Y tokens..."
  // 2. Save to memory for chat: "Tell me about wallets with high ETH balances"
});
 
proxiedServer.tool('get_evm_transactions', 'Get transaction history with AI analysis', {
  walletAddress: z.string().describe('EVM wallet address'),
  limit: z.number().optional().describe('Number of transactions'),
  includeTokenTransfers: z.boolean().optional().describe('Include token transfers')
}, async ({ walletAddress, limit = 25, includeTokenTransfers = true }) => {
  const transactions = await fetchTransactionHistory(
    walletAddress, 
    limit, 
    includeTokenTransfers
  );
  
  return {
    content: [{
      type: 'text',
      text: JSON.stringify(transactions, null, 2)
    }]
  };
  // AI enhancement provides:
  // - Summary of transaction patterns
  // - Memory storage for behavioral analysis
  // - Chat interface: "What DeFi protocols does this wallet use?"
});
 
proxiedServer.tool('get_token_holders', 'Analyze token distribution with insights', {
  contractAddress: z.string().describe('Token contract address'),
  chainId: z.string().optional().describe('Chain ID'),
  limit: z.number().optional().describe('Number of holders to analyze')
}, async ({ contractAddress, chainId = '1', limit = 100 }) => {
  const holders = await analyzeTokenHolders(contractAddress, chainId, limit);
  
  return {
    content: [{
      type: 'text',
      text: JSON.stringify(holders, null, 2)
    }]
  };
  // AI creates executive summaries of:
  // - Holder concentration analysis
  // - Whale identification
  // - Distribution patterns
});
 
// Helper functions (implement with your preferred blockchain data provider)
async function fetchWalletBalances(address: string, chainId: string, limit: number) {
  // Example using environment-based API configuration
  const apiKey = process.env.BLOCKCHAIN_API_KEY; // Not committed to git!
  const response = await fetch(`https://api.example.com/v1/balances/${address}?chain=${chainId}&limit=${limit}`, {
    headers: { 'Authorization': `Bearer ${apiKey}` }
  });
  return response.json();
}
 
async function fetchTransactionHistory(address: string, limit: number, includeTokens: boolean) {
  const apiKey = process.env.BLOCKCHAIN_API_KEY;
  const response = await fetch(`https://api.example.com/v1/transactions/${address}?limit=${limit}&tokens=${includeTokens}`, {
    headers: { 'Authorization': `Bearer ${apiKey}` }
  });
  return response.json();
}
 
async function analyzeTokenHolders(contract: string, chainId: string, limit: number) {
  const apiKey = process.env.BLOCKCHAIN_API_KEY;
  const response = await fetch(`https://api.example.com/v1/tokens/${contract}/holders?chain=${chainId}&limit=${limit}`, {
    headers: { 'Authorization': `Bearer ${apiKey}` }
  });
  return response.json();
}
 
// Start the enhanced blockchain server
const transport = new StdioServerTransport();
await proxiedServer.connect(transport);
console.log('Enhanced Web3 Analytics Server started with AI capabilities');

Environment Configuration for Blockchain Server

# .env file (never commit this!)
NODE_ENV=production
BLOCKCHAIN_API_KEY=your_blockchain_api_key_here
OPENAI_API_KEY=your_openai_api_key_here
 
# Development
NODE_ENV=development
# Mock providers will be used automatically

Docker Configuration for Web3 Server

# Dockerfile
FROM node:18-alpine
 
WORKDIR /app
COPY package*.json ./
RUN npm ci --only=production
 
COPY dist/ ./dist/
 
# Never include API keys in Docker images!
# Use environment variables or secrets management
EXPOSE 3000
 
CMD ["node", "dist/index.js"]
# docker-compose.yml
version: '3.8'
services:
  web3-analytics:
    build: .
    environment:
      - NODE_ENV=production
      # Use Docker secrets or external config for API keys
      - BLOCKCHAIN_API_KEY_FILE=/run/secrets/blockchain_api_key
      - OPENAI_API_KEY_FILE=/run/secrets/openai_api_key
    secrets:
      - blockchain_api_key
      - openai_api_key
    ports:
      - "3000:3000"
 
secrets:
  blockchain_api_key:
    external: true
  openai_api_key:
    external: true

Testing Blockchain AI Enhancement

// tests/blockchain.test.ts
import { describe, test, expect } from '@jest/globals';
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { wrapWithProxy, LLMSummarizationPlugin, ChatMemoryPlugin } from 'mcp-proxy-wrapper';
 
describe('Blockchain AI Enhancement', () => {
  test('enhances wallet balance analysis', async () => {
    const server = new McpServer('test-web3-server', '1.0.0');
    
    // Mock blockchain tool
    server.tool('get_evm_balances', {}, async () => ({
      content: [{
        type: 'text',
        text: JSON.stringify({
          address: '0x1234...5678',
          totalValue: 150000.50,
          tokens: [
            { symbol: 'ETH', balance: '50.5', value: 100000 },
            { symbol: 'USDC', balance: '50000', value: 50000 }
          ]
        })
      }]
    }));
    
    const summaryPlugin = new LLMSummarizationPlugin();
    summaryPlugin.updateConfig({
      options: {
        provider: 'mock',
        summarizeTools: ['get_evm_balances'],
        minContentLength: 50
      }
    });
    
    const proxiedServer = await wrapWithProxy(server, { 
      plugins: [summaryPlugin] 
    });
    
    const result = await proxiedServer.callTool('get_evm_balances', {
      walletAddress: '0x1234567890123456789012345678901234567890'
    });
    
    // Verify AI enhancement
    expect(result._meta?.summarized).toBe(true);
    expect(result.content[0].text).toContain('Summary:');
    expect(result._meta?.originalStorageKey).toBeDefined();
  });
});

Content Platform with Usage Limits

A content creation platform with user tier management:

// Content platform with AI summarization and memory
const summaryPlugin = new LLMSummarizationPlugin();
summaryPlugin.updateConfig({
  options: {
    provider: 'openai',
    openaiApiKey: process.env.OPENAI_API_KEY,
    summarizeTools: ['advanced-article'], // Only summarize premium content
    minContentLength: 500,
    saveOriginal: true
  }
});
 
const memoryPlugin = new ChatMemoryPlugin();
memoryPlugin.updateConfig({
  options: {
    saveResponses: true,
    enableChat: true,
    maxEntries: 3000, // Store lots of content
    sessionTimeout: 30 * 24 * 60 * 60 * 1000, // 30 days for content work
    excludeTools: [] // Save all content creation
  }
});
 
const proxiedServer = await wrapWithProxy(server, {
  plugins: [memoryPlugin, summaryPlugin]
});
 
// Content creation tools with user tiers
proxiedServer.tool('simple-blog-post', {
  topic: z.string(),
  tone: z.enum(['professional', 'casual', 'humorous']).default('professional'),
  length: z.enum(['short', 'medium', 'long']).default('medium'),
  userId: z.string()
}, async (args) => {
  const post = await generateBlogPost(args);
  return { content: [{ type: 'text', text: post }] };
});
 
proxiedServer.tool('advanced-article', {  // Premium only
  topic: z.string(),
  sources: z.array(z.string()),
  seoKeywords: z.array(z.string()),
  targetAudience: z.string(),
  userId: z.string()
}, async (args) => {
  const article = await generateAdvancedArticle(args);
  return { content: [{ type: 'text', text: article }] };
});

Production Configuration Examples

Environment-Based Setup

// config/index.ts
interface Config {
  database: string;
  logLevel: string;
  rateLimits: Record<string, number>;
  cacheSettings: {
    ttl: number;
    maxSize: number;
  };
}
 
const configs: Record<string, Config> = {
  development: {
    database: 'sqlite:./dev.db',
    logLevel: 'debug',
    rateLimits: {
      'free': 10,
      'premium': 1000
    },
    cacheSettings: {
      ttl: 60000,  // 1 minute for testing
      maxSize: 100
    }
  },
  production: {
    database: process.env.DATABASE_URL!,
    logLevel: 'info',
    rateLimits: {
      'free': 100,
      'premium': 10000,
      'enterprise': 100000
    },
    cacheSettings: {
      ttl: 300000,  // 5 minutes
      maxSize: 10000
    }
  }
};
 
export const config = configs[process.env.NODE_ENV || 'development'];

Docker Deployment

# Dockerfile
FROM node:18-alpine
 
WORKDIR /app
 
COPY package*.json ./
RUN npm ci --only=production
 
COPY dist/ ./dist/
 
EXPOSE 3000
 
CMD ["node", "dist/index.js"]
# docker-compose.yml
version: '3.8'
services:
  mcp-server:
    build: .
    ports:
      - "3000:3000"
    environment:
      - NODE_ENV=production
      - DATABASE_URL=postgresql://user:pass@db:5432/mcpserver
      - LOG_LEVEL=info
      - CACHE_TTL=300000
    depends_on:
      - db
  
  db:
    image: postgres:15
    environment:
      POSTGRES_DB: mcpserver
      POSTGRES_USER: user
      POSTGRES_PASSWORD: pass
    volumes:
      - postgres_data:/var/lib/postgresql/data
 
volumes:
  postgres_data:

Testing Examples

Integration Testing

// tests/integration.test.ts
import { describe, test, expect, beforeEach } from '@jest/globals';
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { wrapWithProxy, LLMSummarizationPlugin, ChatMemoryPlugin } from 'mcp-proxy-wrapper';
import { z } from 'zod';
 
describe('Plugin Integration Tests', () => {
  let proxiedServer: any;
  
  beforeEach(async () => {
    // Create test server with sample tool
    const server = new McpServer('test-server', '1.0.0');
    
    server.tool('test-tool', {
      text: z.string()
    }, async (args) => {
      return {
        content: [{ 
          type: 'text', 
          text: `This is a long response that should be summarized because it exceeds the minimum length: ${args.text}` 
        }]
      };
    });
    
    const summaryPlugin = new LLMSummarizationPlugin();
    summaryPlugin.updateConfig({
      options: {
        provider: 'mock', // Use mock for testing
        minContentLength: 50,
        summarizeTools: ['test-tool']
      }
    });
    
    const memoryPlugin = new ChatMemoryPlugin();
    memoryPlugin.updateConfig({
      options: {
        saveResponses: true,
        maxEntries: 10
      }
    });
    
    proxiedServer = await wrapWithProxy(server, { 
      plugins: [memoryPlugin, summaryPlugin] 
    });
  });
 
  test('summarizes long responses', async () => {
    const result = await proxiedServer.callTool('test-tool', {
      text: 'This is a long response that should be summarized by the plugin because it exceeds the minimum length threshold for summarization.'
    });
    expect(result.result._meta?.summarized).toBe(true);
    expect(result.result.content[0].text).toContain('Summary:');
  });
 
  test('saves responses to memory', async () => {
    await proxiedServer.callTool('test-tool', { text: 'Test content', userId: 'user123' });
    
    const memoryPlugin = proxiedServer.plugins.find(p => p.name === 'chat-memory-plugin');
    const history = memoryPlugin.getConversationHistory('user123', 10);
    expect(history.length).toBe(1);
    expect(history[0].response.content).toContain('Test content');
  });
});

Load Testing

// tests/load.test.ts
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
import { wrapWithProxy } from 'mcp-proxy-wrapper';
import { z } from 'zod';
 
async function loadTest() {
  // Create test server
  const server = new McpServer('load-test-server', '1.0.0');
  
  server.tool('test-tool', {
    data: z.string()
  }, async (args) => {
    return {
      content: [{ type: 'text', text: `Processed: ${args.data}` }]
    };
  });
  
  const proxiedServer = await wrapWithProxy(server, { plugins: [] });
  
  const promises = [];
  const startTime = Date.now();
  
  // Simulate 100 concurrent calls
  for (let i = 0; i < 100; i++) {
    promises.push(proxiedServer.callTool('test-tool', { data: `test-${i}` }));
  }
  
  await Promise.all(promises);
  const duration = Date.now() - startTime;
  
  console.log(`Processed 100 calls in ${duration}ms`);
  console.log(`Average: ${duration / 100}ms per call`);
}

Common Use Cases

Ready to implement? These examples show real production patterns that you can adapt for your specific use case.

Next Steps