Real-World AI Code Examples

Learn with Practical Code

Explore real-world AI code examples with full implementations. Copy, download, and learn from production-ready code snippets in multiple programming languages.

AI Code Completion Assistant

Build an intelligent code completion system using OpenAI Codex API

Code GenerationTypeScriptIntermediate
import { Configuration, OpenAIApi } from 'openai'

const configuration = new Configuration({
  apiKey: process.env.OPENAI_API_KEY,
})
const openai = new OpenAIApi(configuration)

export async function getCodeCompletion(prompt: string, language: string) {
  try {
    const response = await openai.createCompletion({
      model: 'code-davinci-002',
      prompt: `// ${language} code completion
${prompt}`,
      max_tokens: 150,
      temperature: 0.2,
      stop: ['\n\n']
    })
    
    return response.data.choices[0]?.text?.trim() || ''
  } catch (error) {
    console.error('Code completion error:', error)
    throw new Error('Failed to generate code completion')
  }
}

// Usage example
async function example() {
  const prompt = 'function calculateFibonacci(n) {'
  const completion = await getCodeCompletion(prompt, 'JavaScript')
  console.log('Completed code:', prompt + completion)
}

Key Features

OpenAI IntegrationError HandlingTypeScript SupportAsync/Await

Use Cases

IDE Extensions, Code Editors, Developer Tools

Automated Code Review System

Create an AI-powered code review system that analyzes code quality and suggests improvements

Code AnalysisPythonAdvanced
import openai
import ast
import re
from typing import List, Dict, Any

class AICodeReviewer:
    def __init__(self, api_key: str):
        openai.api_key = api_key
    
    def analyze_code(self, code: str, language: str = 'python') -> Dict[str, Any]:
        """Analyze code and provide AI-powered review"""
        
        # Basic syntax check
        syntax_issues = self._check_syntax(code, language)
        
        # AI-powered analysis
        ai_review = self._get_ai_review(code, language)
        
        # Combine results
        return {
            'syntax_issues': syntax_issues,
            'ai_suggestions': ai_review,
            'overall_score': self._calculate_score(syntax_issues, ai_review)
        }
    
    def _check_syntax(self, code: str, language: str) -> List[str]:
        """Check for basic syntax issues"""
        issues = []
        
        if language.lower() == 'python':
            try:
                ast.parse(code)
            except SyntaxError as e:
                issues.append(f"Syntax Error: {e.msg} at line {e.lineno}")
        
        return issues
    
    def _get_ai_review(self, code: str, language: str) -> Dict[str, Any]:
        """Get AI-powered code review"""
        prompt = f"""
        Review this {language} code and provide suggestions for:
        1. Code quality and best practices
        2. Performance optimizations
        3. Security considerations
        4. Readability improvements
        
        Code:
        {code}
        
        Provide specific, actionable feedback:
        """
        
        try:
            response = openai.ChatCompletion.create(
                model="gpt-4",
                messages=[{"role": "user", "content": prompt}],
                max_tokens=500,
                temperature=0.3
            )
            
            return {
                'suggestions': response.choices[0].message.content,
                'confidence': 0.85
            }
        except Exception as e:
            return {
                'suggestions': f"AI review failed: {str(e)}",
                'confidence': 0.0
            }
    
    def _calculate_score(self, syntax_issues: List[str], ai_review: Dict[str, Any]) -> float:
        """Calculate overall code quality score"""
        base_score = 100.0
        
        # Deduct points for syntax issues
        base_score -= len(syntax_issues) * 20
        
        # Adjust based on AI confidence
        base_score *= ai_review.get('confidence', 0.5)
        
        return max(0.0, min(100.0, base_score))

# Usage example
reviewer = AICodeReviewer('your-openai-api-key')
result = reviewer.analyze_code('''
def fibonacci(n):
    if n <= 1:
        return n
    return fibonacci(n-1) + fibonacci(n-2)
''', 'python')

print(f"Code Score: {result['overall_score']:.1f}/100")

Key Features

AST ParsingAI AnalysisScore CalculationMulti-language Support

Use Cases

CI/CD Pipelines, Code Quality Tools, Development Workflows

Smart Documentation Generator

Generate comprehensive documentation from code using AI analysis

DocumentationJavaScriptBeginner
const { Configuration, OpenAIApi } = require('openai')
const fs = require('fs').promises

class DocumentationGenerator {
  constructor(apiKey) {
    const configuration = new Configuration({ apiKey })
    this.openai = new OpenAIApi(configuration)
  }

  async generateDocumentation(code, options = {}) {
    const {
      language = 'javascript',
      style = 'comprehensive',
      includeExamples = true
    } = options

    try {
      const prompt = this.buildPrompt(code, language, style, includeExamples)
      
      const response = await this.openai.createChatCompletion({
        model: 'gpt-3.5-turbo',
        messages: [{ role: 'user', content: prompt }],
        max_tokens: 1000,
        temperature: 0.3
      })

      const documentation = response.data.choices[0].message.content
      return this.formatDocumentation(documentation, language)
    } catch (error) {
      throw new Error(`Documentation generation failed: ${error.message}`)
    }
  }

  buildPrompt(code, language, style, includeExamples) {
    return `Generate ${style} documentation for this ${language} code:

${code}

Please include:
- Function/class descriptions
- Parameter explanations
- Return value descriptions
- Usage examples: ${includeExamples ? 'Yes' : 'No'}
- Best practices and notes

Format as clean, readable documentation.`
  }

  formatDocumentation(doc, language) {
    return {
      content: doc,
      language,
      generatedAt: new Date().toISOString(),
      sections: this.extractSections(doc)
    }
  }

  extractSections(doc) {
    const sections = []
    const lines = doc.split('\n')
    
    let currentSection = null
    for (const line of lines) {
      if (line.startsWith('#')) {
        if (currentSection) sections.push(currentSection)
        currentSection = { title: line.replace(/^#+\s*/, ''), content: [] }
      } else if (currentSection && line.trim()) {
        currentSection.content.push(line)
      }
    }
    
    if (currentSection) sections.push(currentSection)
    return sections
  }

  async saveDocumentation(documentation, filename) {
    const content = `# Generated Documentation

Generated on: ${documentation.generatedAt}
Language: ${documentation.language}

${documentation.content}
`
    await fs.writeFile(filename, content, 'utf8')
    return filename
  }
}

// Usage example
async function example() {
  const generator = new DocumentationGenerator('your-openai-api-key')
  
  const sampleCode = `
  function calculateCompoundInterest(principal, rate, time, compound) {
    const amount = principal * Math.pow((1 + rate / compound), compound * time)
    return amount - principal
  }
  `
  
  const docs = await generator.generateDocumentation(sampleCode, {
    language: 'javascript',
    style: 'comprehensive',
    includeExamples: true
  })
  
  await generator.saveDocumentation(docs, 'api-docs.md')
  console.log('Documentation generated successfully!')
}

Key Features

AI DocumentationMultiple FormatsSection ExtractionFile Export

Use Cases

API Documentation, Code Comments, Technical Writing

Intelligent Test Generator

Automatically generate comprehensive unit tests using AI analysis

TestingTypeScriptIntermediate
import { Configuration, OpenAIApi } from 'openai'
import * as fs from 'fs/promises'

interface TestCase {
  name: string
  input: any
  expected: any
  description: string
}

interface TestSuite {
  functionName: string
  testCases: TestCase[]
  setupCode?: string
  teardownCode?: string
}

class AITestGenerator {
  private openai: OpenAIApi

  constructor(apiKey: string) {
    const configuration = new Configuration({ apiKey })
    this.openai = new OpenAIApi(configuration)
  }

  async generateTests(
    code: string, 
    framework: 'jest' | 'mocha' | 'vitest' = 'jest'
  ): Promise<string> {
    try {
      const analysis = await this.analyzeCode(code)
      const testSuite = await this.generateTestSuite(code, analysis, framework)
      return this.formatTestCode(testSuite, framework)
    } catch (error) {
      throw new Error(`Test generation failed: ${error.message}`)
    }
  }

  private async analyzeCode(code: string): Promise<any> {
    const prompt = `Analyze this code and identify:
1. Function name and parameters
2. Expected behavior and edge cases
3. Input/output types
4. Error conditions
5. Dependencies and side effects

Code:
${code}

Provide a structured analysis for test generation.`

    const response = await this.openai.createChatCompletion({
      model: 'gpt-4',
      messages: [{ role: 'user', content: prompt }],
      max_tokens: 800,
      temperature: 0.2
    })

    return this.parseAnalysis(response.data.choices[0].message.content)
  }

  private async generateTestSuite(
    code: string, 
    analysis: any, 
    framework: string
  ): Promise<TestSuite> {
    const prompt = `Generate comprehensive ${framework} test cases for this code:

${code}

Based on this analysis:
${JSON.stringify(analysis, null, 2)}

Include:
- Happy path tests
- Edge cases
- Error conditions
- Boundary value tests
- Mock requirements

Return as structured JSON with test cases.`

    const response = await this.openai.createChatCompletion({
      model: 'gpt-4',
      messages: [{ role: 'user', content: prompt }],
      max_tokens: 1200,
      temperature: 0.3
    })

    return JSON.parse(response.data.choices[0].message.content)
  }

  private formatTestCode(testSuite: TestSuite, framework: string): string {
    const { functionName, testCases, setupCode, teardownCode } = testSuite

    let testCode = `import { ${functionName} } from './${functionName}'

describe('${functionName}', () => {`

    if (setupCode) {
      testCode += `
  beforeEach(() => {
    ${setupCode}
  })`
    }

    testCases.forEach(testCase => {
      testCode += `
  
  test('${testCase.name}', () => {
    // ${testCase.description}
    const result = ${functionName}(${this.formatInput(testCase.input)})
    expect(result).toEqual(${JSON.stringify(testCase.expected)})
  })`
    })

    if (teardownCode) {
      testCode += `
  
  afterEach(() => {
    ${teardownCode}
  })`
    }

    testCode += `
})
`

    return testCode
  }

  private formatInput(input: any): string {
    if (Array.isArray(input)) {
      return JSON.stringify(input)
    }
    if (typeof input === 'string') {
      return `'${input}'`
    }
    return String(input)
  }

  private parseAnalysis(content: string): any {
    // Simple parsing - in production, use more robust parsing
    try {
      return JSON.parse(content)
    } catch {
      return { analysis: content }
    }
  }

  async saveTests(testCode: string, filename: string): Promise<void> {
    await fs.writeFile(filename, testCode, 'utf8')
  }
}

// Usage example
async function example() {
  const generator = new AITestGenerator('your-openai-api-key')
  
  const sampleFunction = `
  function validateEmail(email: string): boolean {
    const emailRegex = /^[^\s@]+@[^\s@]+\.[^\s@]+$/
    return emailRegex.test(email)
  }
  `
  
  const tests = await generator.generateTests(sampleFunction, 'jest')
  await generator.saveTests(tests, 'validateEmail.test.ts')
  
  console.log('Tests generated successfully!')
  console.log(tests)
}

Key Features

Code AnalysisMultiple FrameworksEdge Case DetectionTest Export

Use Cases

Unit Testing, TDD, Quality Assurance, CI/CD