Compare commits

...

2 Commits

Author SHA1 Message Date
Harun CAN
294690949b main
Some checks failed
CI / build (push) Has been cancelled
2026-03-30 14:24:44 +03:00
Harun CAN
fd2580b311 main 2026-03-23 03:15:08 +03:00
34 changed files with 4266 additions and 388 deletions

1542
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -19,7 +19,8 @@
"test:e2e": "jest --config ./test/jest-e2e.json"
},
"dependencies": {
"@aws-sdk/client-s3": "^3.964.0",
"@aws-sdk/client-s3": "^3.1014.0",
"@aws-sdk/lib-storage": "^3.1014.0",
"@google/genai": "^1.35.0",
"@nestjs/bullmq": "^11.0.4",
"@nestjs/cache-manager": "^3.1.0",

View File

@@ -3,6 +3,7 @@ import { ConfigModule, ConfigService } from '@nestjs/config';
import { APP_FILTER, APP_GUARD, APP_INTERCEPTOR } from '@nestjs/core';
import { ThrottlerModule, ThrottlerGuard } from '@nestjs/throttler';
import { CacheModule } from '@nestjs/cache-manager';
import { BullModule } from '@nestjs/bullmq';
import { redisStore } from 'cache-manager-redis-yet';
import { LoggerModule } from 'nestjs-pino';
import {
@@ -22,6 +23,7 @@ import {
i18nConfig,
featuresConfig,
throttleConfig,
storageConfig,
} from './config/configuration';
import { geminiConfig } from './modules/gemini/gemini.config';
import { validateEnv } from './config/env.validation';
@@ -40,6 +42,7 @@ import { AdminModule } from './modules/admin/admin.module';
import { HealthModule } from './modules/health/health.module';
import { GeminiModule } from './modules/gemini/gemini.module';
import { SkriptaiModule } from './modules/skriptai/skriptai.module';
import { StorageModule } from './modules/storage/storage.module';
// Guards
import {
@@ -63,9 +66,23 @@ import {
featuresConfig,
throttleConfig,
geminiConfig,
storageConfig,
],
}),
// BullMQ (Queue System)
BullModule.forRootAsync({
imports: [ConfigModule],
inject: [ConfigService],
useFactory: (configService: ConfigService) => ({
connection: {
host: configService.get('redis.host', 'localhost'),
port: configService.get('redis.port', 6379),
password: configService.get('redis.password', undefined),
},
}),
}),
// Logger (Structured Logging with Pino)
LoggerModule.forRootAsync({
imports: [ConfigModule],
@@ -161,6 +178,7 @@ import {
// Optional Modules (controlled by env variables)
GeminiModule,
SkriptaiModule,
StorageModule,
HealthModule,
],
providers: [

View File

@@ -0,0 +1,96 @@
import { Injectable } from '@nestjs/common';
import { PrismaService } from '../../database/prisma.service';
/**
* Pagination & Search Helpers
*
* Standardized pagination support and full-text search for projects.
*
* TR: Sayfalama ve tam metin arama yardımcıları.
*/
export interface PaginationParams {
page?: number;
limit?: number;
sortBy?: string;
sortOrder?: 'asc' | 'desc';
}
export interface PaginatedResult<T> {
data: T[];
meta: {
total: number;
page: number;
limit: number;
totalPages: number;
hasNext: boolean;
hasPrev: boolean;
};
}
export interface SearchParams extends PaginationParams {
query?: string;
status?: string;
contentType?: string;
}
/**
* Build standard pagination options for Prisma
*/
export function buildPaginationOptions(params: PaginationParams) {
const page = Math.max(1, params.page || 1);
const limit = Math.min(100, Math.max(1, params.limit || 20));
const skip = (page - 1) * limit;
const orderBy: Record<string, 'asc' | 'desc'> = {};
if (params.sortBy) {
orderBy[params.sortBy] = params.sortOrder || 'desc';
} else {
orderBy['updatedAt'] = 'desc';
}
return { skip, take: limit, orderBy, page, limit };
}
/**
* Build paginated result from data and total count
*/
export function buildPaginatedResult<T>(
data: T[],
total: number,
page: number,
limit: number,
): PaginatedResult<T> {
const totalPages = Math.ceil(total / limit);
return {
data,
meta: {
total,
page,
limit,
totalPages,
hasNext: page < totalPages,
hasPrev: page > 1,
},
};
}
/**
* Build PostgreSQL full-text search condition
*
* Uses Prisma's contains with mode: 'insensitive' for compatibility.
* For production, consider PostgreSQL tsvector for true FTS.
*/
export function buildSearchCondition(query?: string) {
if (!query || query.trim().length === 0) return {};
const search = query.trim();
return {
OR: [
{ topic: { contains: search, mode: 'insensitive' as const } },
{ logline: { contains: search, mode: 'insensitive' as const } },
{ seoTitle: { contains: search, mode: 'insensitive' as const } },
{ seoDescription: { contains: search, mode: 'insensitive' as const } },
],
};
}

View File

@@ -0,0 +1,99 @@
import { Injectable, NestMiddleware, Logger } from '@nestjs/common';
import { Request, Response, NextFunction } from 'express';
import { randomUUID } from 'crypto';
/**
* Correlation ID Middleware
*
* Assigns a unique correlation ID to every incoming request.
* The ID is:
* 1. Read from `x-correlation-id` header (if provided by client/gateway)
* 2. Or auto-generated as a UUID
* 3. Set on the response header
* 4. Attached to the request object for downstream logging
*
* TR: Her isteğe benzersiz korelasyon ID'si atar.
* Loglarda istekleri takip etmek için kullanılır.
*/
@Injectable()
export class CorrelationIdMiddleware implements NestMiddleware {
private readonly logger = new Logger(CorrelationIdMiddleware.name);
use(req: Request, res: Response, next: NextFunction) {
const correlationId =
(req.headers['x-correlation-id'] as string) || randomUUID();
// Attach to request for downstream use
(req as any).correlationId = correlationId;
// Set on response header
res.setHeader('x-correlation-id', correlationId);
next();
}
}
/**
* AI Metrics Logger
*
* Structured logging helper for AI operations.
* Logs:
* - Operation type (generateJSON, generateText, etc.)
* - Model used
* - Token usage (input/output)
* - Duration
* - Success/failure
* - Correlation ID
*
* TR: AI işlemleri için yapılandırılmış log kaydı.
*/
export interface AIMetrics {
operation: string;
model: string;
inputTokens?: number;
outputTokens?: number;
durationMs: number;
success: boolean;
error?: string;
projectId?: string;
correlationId?: string;
}
export function logAIMetrics(logger: Logger, metrics: AIMetrics): void {
const { operation, model, inputTokens, outputTokens, durationMs, success } =
metrics;
const tokenInfo =
inputTokens !== undefined
? ` | tokens: ${inputTokens}${outputTokens || '?'}`
: '';
const status = success ? '✅' : '❌';
logger.log(
`${status} AI ${operation} | model: ${model} | ${durationMs}ms${tokenInfo}${metrics.projectId ? ` | project: ${metrics.projectId}` : ''}${metrics.correlationId ? ` | cid: ${metrics.correlationId}` : ''}`,
);
if (!success && metrics.error) {
logger.error(`AI ${operation} error: ${metrics.error}`);
}
}
/**
* Log levels used across the application
*
* - DEBUG: Development details, verbose data
* - INFO: Normal operations, startup, connections
* - WARN: Recoverable issues, fallbacks, deprecations
* - ERROR: Failures that need attention
* - FATAL: Critical failures, shutdown required
*/
export const LOG_LEVELS = {
AI_CALL: 'info',
CACHE_HIT: 'debug',
CACHE_MISS: 'debug',
QUEUE_JOB: 'info',
WEBSOCKET_EVENT: 'debug',
STORAGE_UPLOAD: 'info',
AUTH_EVENT: 'info',
} as const;

View File

@@ -0,0 +1,157 @@
import { Injectable, Logger, Inject } from '@nestjs/common';
import { CACHE_MANAGER } from '@nestjs/cache-manager';
import type { Cache } from 'cache-manager';
/**
* CacheStrategyService
*
* Centralized cache management for SkriptAI with tagged invalidation.
*
* Strategies:
* - AI Response Cache: Cache expensive AI calls (keyed by prompt hash)
* - Project Data Cache: Cache project details with smart invalidation
* - Rate Limiting: Track API call counts per user
*
* TR: Merkezi cache yönetimi — AI yanıt cache, proje cache, oran sınırlama.
*/
@Injectable()
export class CacheStrategyService {
private readonly logger = new Logger(CacheStrategyService.name);
constructor(@Inject(CACHE_MANAGER) private readonly cache: Cache) {}
// ========== AI RESPONSE CACHE ==========
/**
* Cache an AI response with a prompt-based key
*
* @param promptHash - MD5 or similar hash of the prompt
* @param data - AI response data
* @param ttlMs - Time to live in ms (default: 30 min)
*/
async cacheAIResponse(
promptHash: string,
data: any,
ttlMs: number = 30 * 60 * 1000,
): Promise<void> {
const key = `ai:${promptHash}`;
try {
await this.cache.set(key, JSON.stringify(data), ttlMs);
this.logger.debug(`AI response cached: ${key}`);
} catch (error) {
this.logger.warn(`Cache set failed: ${key}`, error);
}
}
/**
* Get a cached AI response
*/
async getCachedAIResponse<T = any>(promptHash: string): Promise<T | null> {
const key = `ai:${promptHash}`;
try {
const cached = await this.cache.get<string>(key);
if (cached) {
this.logger.debug(`AI cache hit: ${key}`);
return JSON.parse(cached);
}
} catch (error) {
this.logger.warn(`Cache get failed: ${key}`, error);
}
return null;
}
// ========== PROJECT DATA CACHE ==========
/**
* Cache project data
*/
async cacheProject(
projectId: string,
data: any,
ttlMs: number = 5 * 60 * 1000,
): Promise<void> {
const key = `project:${projectId}`;
try {
await this.cache.set(key, JSON.stringify(data), ttlMs);
} catch {
/* silent */
}
}
/**
* Get cached project data
*/
async getCachedProject<T = any>(projectId: string): Promise<T | null> {
const key = `project:${projectId}`;
try {
const cached = await this.cache.get<string>(key);
return cached ? JSON.parse(cached) : null;
} catch {
return null;
}
}
/**
* Invalidate project cache (call after any project mutation)
*/
async invalidateProject(projectId: string): Promise<void> {
try {
await this.cache.del(`project:${projectId}`);
this.logger.debug(`Project cache invalidated: ${projectId}`);
} catch {
/* silent */
}
}
// ========== RATE LIMITING ==========
/**
* Check and increment rate limit counter
*
* @param userId - User identifier
* @param action - Action name (e.g., 'ai-call')
* @param maxPerWindow - Max calls per window
* @param windowMs - Window duration in ms (default: 1 min)
* @returns { allowed, remaining, resetIn }
*/
async checkRateLimit(
userId: string,
action: string,
maxPerWindow: number = 10,
windowMs: number = 60 * 1000,
): Promise<{ allowed: boolean; remaining: number; resetIn: number }> {
const key = `rate:${userId}:${action}`;
try {
const current = await this.cache.get<string>(key);
const count = current ? parseInt(current, 10) : 0;
if (count >= maxPerWindow) {
return { allowed: false, remaining: 0, resetIn: windowMs };
}
await this.cache.set(key, String(count + 1), windowMs);
return {
allowed: true,
remaining: maxPerWindow - count - 1,
resetIn: windowMs,
};
} catch {
return { allowed: true, remaining: maxPerWindow, resetIn: windowMs };
}
}
// ========== UTILITY ==========
/**
* Generate a simple hash from prompt text (deterministic)
*/
hashPrompt(prompt: string): string {
let hash = 0;
for (let i = 0; i < prompt.length; i++) {
const char = prompt.charCodeAt(i);
hash = (hash << 5) - hash + char;
hash |= 0; // Convert to 32bit integer
}
return Math.abs(hash).toString(36);
}
}

View File

@@ -55,3 +55,12 @@ export const throttleConfig = registerAs('throttle', () => ({
ttl: parseInt(process.env.THROTTLE_TTL || '60000', 10),
limit: parseInt(process.env.THROTTLE_LIMIT || '100', 10),
}));
export const storageConfig = registerAs('storage', () => ({
enabled: process.env.STORAGE_ENABLED === 'true',
endpoint: process.env.STORAGE_ENDPOINT || 'http://192.168.1.199:9000',
accessKey: process.env.STORAGE_ACCESS_KEY || 'minioadmin',
secretKey: process.env.STORAGE_SECRET_KEY || 'minioadmin',
bucket: process.env.STORAGE_BUCKET || 'skriptai-assets',
publicUrl: process.env.STORAGE_PUBLIC_URL || 'http://192.168.1.199:9000',
}));

67
src/config/languages.ts Normal file
View File

@@ -0,0 +1,67 @@
/**
* Supported Languages Configuration
*
* Faz 5.1 — Çoklu dil genişletme altyapısı.
* Yeni diller eklemek için bu dosyaya ekleme yapın.
*
* TR: Desteklenen diller ve RTL yapılandırması.
*/
export interface LanguageConfig {
code: string;
name: string;
nativeName: string;
flag: string;
rtl: boolean;
enabled: boolean;
}
export const SUPPORTED_LANGUAGES: LanguageConfig[] = [
{ code: 'tr', name: 'Turkish', nativeName: 'Türkçe', flag: '🇹🇷', rtl: false, enabled: true },
{ code: 'en', name: 'English', nativeName: 'English', flag: '🇬🇧', rtl: false, enabled: true },
{ code: 'ar', name: 'Arabic', nativeName: 'العربية', flag: '🇸🇦', rtl: true, enabled: false },
{ code: 'es', name: 'Spanish', nativeName: 'Español', flag: '🇪🇸', rtl: false, enabled: false },
{ code: 'de', name: 'German', nativeName: 'Deutsch', flag: '🇩🇪', rtl: false, enabled: false },
{ code: 'fr', name: 'French', nativeName: 'Français', flag: '🇫🇷', rtl: false, enabled: false },
{ code: 'ja', name: 'Japanese', nativeName: '日本語', flag: '🇯🇵', rtl: false, enabled: false },
{ code: 'ko', name: 'Korean', nativeName: '한국어', flag: '🇰🇷', rtl: false, enabled: false },
{ code: 'zh', name: 'Chinese', nativeName: '中文', flag: '🇨🇳', rtl: false, enabled: false },
{ code: 'pt', name: 'Portuguese', nativeName: 'Português', flag: '🇧🇷', rtl: false, enabled: false },
{ code: 'ru', name: 'Russian', nativeName: 'Русский', flag: '🇷🇺', rtl: false, enabled: false },
{ code: 'hi', name: 'Hindi', nativeName: 'हिन्दी', flag: '🇮🇳', rtl: false, enabled: false },
];
/**
* Get only enabled languages
*/
export function getEnabledLanguages(): LanguageConfig[] {
return SUPPORTED_LANGUAGES.filter((l) => l.enabled);
}
/**
* Check if language is RTL
*/
export function isRTL(code: string): boolean {
return SUPPORTED_LANGUAGES.find((l) => l.code === code)?.rtl ?? false;
}
/**
* Get language config by code
*/
export function getLanguageConfig(code: string): LanguageConfig | undefined {
return SUPPORTED_LANGUAGES.find((l) => l.code === code);
}
/**
* AI Prompt language instruction map
* Used to instruct the AI about output language characteristics
*/
export const LANGUAGE_INSTRUCTIONS: Record<string, string> = {
tr: 'Doğal, akıcı Türkçe kullan. Argo ve günlük dil kullanımına dikkat et.',
en: 'Use natural, fluent English. Match the requested tone and style.',
ar: 'استخدم اللغة العربية الفصحى الحديثة مع مراعاة الأسلوب المطلوب',
es: 'Utiliza español natural y fluido. Adapta el tono según lo solicitado.',
de: 'Verwende natürliches, flüssiges Deutsch. Passe den Ton an den gewünschten Stil an.',
fr: 'Utilise un français naturel et fluide. Adapte le ton au style demandé.',
ja: '自然で流暢な日本語を使用してください。要求されたトーンとスタイルに合わせてください。',
};

View File

@@ -0,0 +1,292 @@
import { Injectable, Logger } from '@nestjs/common';
import {
estimateTokens,
estimateTokensForSegments,
getModelLimits,
analyzeTokenUsage,
TokenUsageReport,
} from './token-counter';
/**
* Context Priority Levels
* Higher priority = kept during trimming, lower = removed first
*/
export enum ContextPriority {
CRITICAL = 100, // System instructions, schema
HIGH = 80, // Topic, logline, key brief items
MEDIUM = 60, // Sources, characters
LOW = 40, // Extended notes, enrichment data
OPTIONAL = 20, // Visual descriptions, editor notes
}
export interface ContextBlock {
id: string;
content: string;
priority: ContextPriority;
estimatedTokens: number;
label: string;
truncatable: boolean;
}
/**
* ContextManagerService
*
* Manages the context window for AI prompts. Intelligently assembles
* context blocks within token limits, trimming low-priority content first.
*
* Strategy:
* 1. Each piece of context is tagged with a priority level
* 2. Blocks are sorted by priority (highest first)
* 3. Blocks are added until the budget is reached
* 4. Truncatable blocks can be partially included
*
* TR: AI prompt'ları için bağlam penceresi yöneticisi.
* Öncelik sırasına göre akıllı kırpma yapar.
*/
@Injectable()
export class ContextManagerService {
private readonly logger = new Logger(ContextManagerService.name);
/**
* Build optimized context string from blocks within token budget
*
* @param blocks - Array of context blocks
* @param model - Model name for limit lookup
* @param language - Language for token estimation
* @param reserveForOutput - Reserve tokens for AI output (default: 8000)
* @returns Assembled context within budget
*/
assembleContext(
blocks: ContextBlock[],
model: string,
language: string = 'en',
reserveForOutput: number = 8000,
): {
context: string;
includedBlocks: string[];
excludedBlocks: string[];
report: TokenUsageReport;
} {
const limits = getModelLimits(model);
const budget = limits.safeInput - reserveForOutput;
// Sort by priority (highest first)
const sorted = [...blocks].sort((a, b) => b.priority - a.priority);
let currentTokens = 0;
const includedParts: string[] = [];
const includedIds: string[] = [];
const excludedIds: string[] = [];
for (const block of sorted) {
if (currentTokens + block.estimatedTokens <= budget) {
// Full include
includedParts.push(block.content);
includedIds.push(block.id);
currentTokens += block.estimatedTokens;
} else if (block.truncatable && currentTokens < budget) {
// Partial include — truncate to fit
const remainingBudget = budget - currentTokens;
const truncated = this.truncateToTokens(
block.content,
remainingBudget,
language,
);
if (truncated.length > 0) {
includedParts.push(truncated + '\n[... içerik kırpıldı ...]');
includedIds.push(`${block.id} (kırpılmış)`);
currentTokens += estimateTokens(truncated, language);
} else {
excludedIds.push(block.id);
}
} else {
excludedIds.push(block.id);
}
}
const assembledContext = includedParts.join('\n\n');
const report = analyzeTokenUsage(assembledContext, model, language);
if (excludedIds.length > 0) {
this.logger.warn(
`Context trimmed: excluded ${excludedIds.length} blocks — ${excludedIds.join(', ')}`,
);
}
return {
context: assembledContext,
includedBlocks: includedIds,
excludedBlocks: excludedIds,
report,
};
}
/**
* Create context blocks from project data
* Standardized way to build context for any AI operation
*/
buildProjectContextBlocks(project: {
topic: string;
logline?: string | null;
contentType: string;
targetAudience: string[];
speechStyle: string[];
language: string;
userNotes?: string | null;
sources?: { title: string; snippet: string; type: string }[];
briefItems?: { question: string; answer: string }[];
characters?: { name: string; role: string; values: string; traits: string; mannerisms: string }[];
segments?: { narratorScript?: string | null; visualDescription?: string | null; segmentType: string }[];
}): ContextBlock[] {
const lang = project.language || 'en';
const blocks: ContextBlock[] = [];
// CRITICAL: Topic & Core Info
const coreInfo = [
`Konu: ${project.topic}`,
project.logline ? `Logline: ${project.logline}` : '',
`İçerik Tipi: ${project.contentType}`,
`Hedef Kitle: ${project.targetAudience.join(', ')}`,
`Konuşma Stili: ${project.speechStyle.join(', ')}`,
`Dil: ${project.language}`,
]
.filter(Boolean)
.join('\n');
blocks.push({
id: 'core-info',
content: coreInfo,
priority: ContextPriority.CRITICAL,
estimatedTokens: estimateTokens(coreInfo, lang),
label: 'Core Project Info',
truncatable: false,
});
// HIGH: Brief items
if (project.briefItems?.length) {
const briefText = project.briefItems
.map((b) => `S: ${b.question}\nC: ${b.answer}`)
.join('\n\n');
blocks.push({
id: 'brief-items',
content: briefText,
priority: ContextPriority.HIGH,
estimatedTokens: estimateTokens(briefText, lang),
label: 'Brief Items',
truncatable: true,
});
}
// MEDIUM: Characters
if (project.characters?.length) {
const charText = project.characters
.map(
(c) =>
`${c.name} (${c.role}): Değerler[${c.values}] Özellikler[${c.traits}] Tavırlar[${c.mannerisms}]`,
)
.join('\n');
blocks.push({
id: 'characters',
content: charText,
priority: ContextPriority.MEDIUM,
estimatedTokens: estimateTokens(charText, lang),
label: 'Characters',
truncatable: true,
});
}
// MEDIUM: Sources
if (project.sources?.length) {
const srcText = project.sources
.slice(0, 5)
.map(
(s, i) =>
`[Kaynak ${i + 1}] (${s.type}): ${s.title}${s.snippet}`,
)
.join('\n');
blocks.push({
id: 'sources',
content: srcText,
priority: ContextPriority.MEDIUM,
estimatedTokens: estimateTokens(srcText, lang),
label: 'Research Sources',
truncatable: true,
});
}
// LOW: User notes
if (project.userNotes) {
blocks.push({
id: 'user-notes',
content: project.userNotes,
priority: ContextPriority.LOW,
estimatedTokens: estimateTokens(project.userNotes, lang),
label: 'User Notes',
truncatable: true,
});
}
// OPTIONAL: Existing segments (for context in regeneration)
if (project.segments?.length) {
const segText = project.segments
.map(
(s, i) =>
`[Segment ${i + 1}${s.segmentType}]: ${s.narratorScript || ''}`,
)
.join('\n');
blocks.push({
id: 'existing-segments',
content: segText,
priority: ContextPriority.OPTIONAL,
estimatedTokens: estimateTokens(segText, lang),
label: 'Existing Segments',
truncatable: true,
});
}
return blocks;
}
/**
* Get token usage report for a text
*/
getUsageReport(
text: string,
model: string,
language: string = 'en',
): TokenUsageReport {
return analyzeTokenUsage(text, model, language);
}
/**
* Estimate tokens for segments
*/
estimateSegmentTokens(
segments: { narratorScript?: string; visualDescription?: string }[],
language: string = 'en',
): number {
return estimateTokensForSegments(segments, language);
}
// ========== HELPERS ==========
private truncateToTokens(
text: string,
maxTokens: number,
language: string,
): string {
// Estimate ratio and truncate by sentences to avoid cutting mid-sentence
const sentences = text.split(/(?<=[.!?。?!])\s+/);
let result = '';
let currentTokens = 0;
for (const sentence of sentences) {
const sentenceTokens = estimateTokens(sentence, language);
if (currentTokens + sentenceTokens > maxTokens) break;
result += (result ? ' ' : '') + sentence;
currentTokens += sentenceTokens;
}
return result;
}
}

View File

@@ -1,6 +1,8 @@
import { Module, Global } from '@nestjs/common';
import { ConfigModule } from '@nestjs/config';
import { GeminiService } from './gemini.service';
import { ContextManagerService } from './context-manager.service';
import { MapReduceService } from './map-reduce.service';
import { geminiConfig } from './gemini.config';
/**
@@ -8,11 +10,16 @@ import { geminiConfig } from './gemini.config';
*
* Optional module for AI-powered features using Google Gemini API.
* Enable by setting ENABLE_GEMINI=true in your .env file.
*
* Includes:
* - GeminiService: Core AI text/JSON/image generation
* - ContextManagerService: Token-aware context window management
* - MapReduceService: Large content analysis via chunking
*/
@Global()
@Module({
imports: [ConfigModule.forFeature(geminiConfig)],
providers: [GeminiService],
exports: [GeminiService],
providers: [GeminiService, ContextManagerService, MapReduceService],
exports: [GeminiService, ContextManagerService, MapReduceService],
})
export class GeminiModule {}

View File

@@ -0,0 +1,169 @@
import { Injectable, Logger } from '@nestjs/common';
import { GeminiService } from './gemini.service';
import { estimateTokens, getModelLimits } from './token-counter';
/**
* MapReduceService
*
* Handles analysis of content that exceeds the context window by:
* 1. MAP: Splitting content into digestible chunks and analyzing each
* 2. REDUCE: Combining individual analyses into a final summary
*
* Use cases:
* - Consistency check on very long scripts (50+ segments)
* - Deep analysis when total script tokens exceed safe limits
* - Aggregated quality scoring across large content sets
*
* TR: Bağlam penceresini aşan içerikler için map-reduce analiz.
* İçeriği parçalara böler, her birini ayrı analiz eder, sonuçları birleştirir.
*/
@Injectable()
export class MapReduceService {
private readonly logger = new Logger(MapReduceService.name);
constructor(private readonly gemini: GeminiService) {}
/**
* Map-Reduce text analysis
*
* @param chunks - Array of text chunks to analyze
* @param mapPrompt - Prompt template for each chunk (use {{CHUNK}} placeholder)
* @param reducePrompt - Prompt template for combining results (use {{RESULTS}} placeholder)
* @param schema - JSON schema string for expected output
* @param options - Optional config
* @returns Combined analysis result
*/
async analyze<T = any>(
chunks: string[],
mapPrompt: string,
reducePrompt: string,
schema: string,
options: {
model?: string;
language?: string;
temperature?: number;
maxChunkTokens?: number;
} = {},
): Promise<{ data: T; mapResults: any[]; chunkCount: number }> {
const {
model,
language = 'en',
temperature = 0.3,
maxChunkTokens = 15000,
} = options;
this.logger.log(
`Map-Reduce: ${chunks.length} chunks, maxChunkTokens: ${maxChunkTokens}`,
);
// ===== MAP PHASE =====
const mapResults: any[] = [];
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
const prompt = mapPrompt.replace('{{CHUNK}}', chunk);
this.logger.debug(
`MAP phase: chunk ${i + 1}/${chunks.length} (${estimateTokens(chunk, language)} tokens)`,
);
try {
const resp = await this.gemini.generateJSON<any>(prompt, schema, {
model,
temperature,
});
mapResults.push(resp.data);
} catch (error) {
this.logger.warn(`MAP failed for chunk ${i + 1}: ${error}`);
mapResults.push({ error: `Chunk ${i + 1} failed`, skipped: true });
}
}
// ===== REDUCE PHASE =====
if (mapResults.length === 1) {
return { data: mapResults[0], mapResults, chunkCount: chunks.length };
}
const resultsJson = JSON.stringify(mapResults, null, 2);
const finalPrompt = reducePrompt.replace('{{RESULTS}}', resultsJson);
this.logger.debug(
`REDUCE phase: combining ${mapResults.length} results`,
);
const reduceResp = await this.gemini.generateJSON<T>(
finalPrompt,
schema,
{ model, temperature },
);
return {
data: reduceResp.data,
mapResults,
chunkCount: chunks.length,
};
}
/**
* Split segments into token-limited chunks
*
* Groups segments so each chunk stays within the token budget.
* Maintains segment order and includes segment index metadata.
*/
chunkSegments(
segments: {
narratorScript?: string | null;
visualDescription?: string | null;
segmentType: string;
}[],
maxTokensPerChunk: number = 15000,
language: string = 'en',
): string[] {
const chunks: string[] = [];
let currentChunk: string[] = [];
let currentTokens = 0;
for (let i = 0; i < segments.length; i++) {
const seg = segments[i];
const segText = `[Segment ${i + 1}${seg.segmentType}]\n${seg.narratorScript || ''}\nVisual: ${seg.visualDescription || 'N/A'}`;
const segTokens = estimateTokens(segText, language);
if (currentTokens + segTokens > maxTokensPerChunk && currentChunk.length > 0) {
chunks.push(currentChunk.join('\n\n'));
currentChunk = [];
currentTokens = 0;
}
currentChunk.push(segText);
currentTokens += segTokens;
}
if (currentChunk.length > 0) {
chunks.push(currentChunk.join('\n\n'));
}
this.logger.log(
`Chunked ${segments.length} segments into ${chunks.length} chunks`,
);
return chunks;
}
/**
* Check if content needs map-reduce (exceeds safe context)
*/
needsMapReduce(
segments: { narratorScript?: string | null }[],
model: string = 'gemini-2.5-flash',
language: string = 'en',
): boolean {
const totalText = segments
.map((s) => s.narratorScript || '')
.join('\n');
const tokens = estimateTokens(totalText, language);
const limits = getModelLimits(model);
// If content takes more than 60% of safe input, use map-reduce
return tokens > limits.safeInput * 0.6;
}
}

View File

@@ -0,0 +1,189 @@
/**
* Model Selector
*
* Task-based model selection strategy for Gemini AI operations.
*
* Strategy:
* - Flash models: Fast, cost-effective — ideal for drafts, summaries, simple tasks
* - Pro models: Higher quality — ideal for final scripts, analysis, critique
*
* Users can override with a quality preference:
* - 'fast': Always use flash
* - 'balanced': Task-based auto-selection (default)
* - 'quality': Always use pro
*
* TR: Görev bazında model seçim stratejisi. Hız/kalite tercihi ile otomatik model seçimi.
*/
export type QualityPreference = 'fast' | 'balanced' | 'quality';
/**
* Task categories that map to model selection
*/
export enum TaskCategory {
// Quick/Draft tasks → Flash
TOPIC_ENRICHMENT = 'TOPIC_ENRICHMENT',
DISCOVERY_QUESTIONS = 'DISCOVERY_QUESTIONS',
SEARCH_QUERY = 'SEARCH_QUERY',
CHARACTER_GENERATION = 'CHARACTER_GENERATION',
LOGLINE_GENERATION = 'LOGLINE_GENERATION',
OUTLINE_GENERATION = 'OUTLINE_GENERATION',
SEGMENT_IMAGE_PROMPT = 'SEGMENT_IMAGE_PROMPT',
// Core generation → Balanced (Pro in quality mode)
CHAPTER_GENERATION = 'CHAPTER_GENERATION',
SEGMENT_REWRITE = 'SEGMENT_REWRITE',
DEEP_RESEARCH = 'DEEP_RESEARCH',
VISUAL_ASSETS = 'VISUAL_ASSETS',
// Analysis/Critique → Pro preferred
NEURO_ANALYSIS = 'NEURO_ANALYSIS',
YOUTUBE_AUDIT = 'YOUTUBE_AUDIT',
COMMERCIAL_BRIEF = 'COMMERCIAL_BRIEF',
CONSISTENCY_CHECK = 'CONSISTENCY_CHECK',
SELF_CRITIQUE = 'SELF_CRITIQUE',
}
// Default model assignments per task
const TASK_MODELS: Record<TaskCategory, { flash: string; pro: string }> = {
// Fast tasks
[TaskCategory.TOPIC_ENRICHMENT]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.DISCOVERY_QUESTIONS]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-flash',
},
[TaskCategory.SEARCH_QUERY]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-flash',
},
[TaskCategory.CHARACTER_GENERATION]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.LOGLINE_GENERATION]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.OUTLINE_GENERATION]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.SEGMENT_IMAGE_PROMPT]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-flash',
},
// Core generation
[TaskCategory.CHAPTER_GENERATION]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.SEGMENT_REWRITE]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.DEEP_RESEARCH]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.VISUAL_ASSETS]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-flash',
},
// Analysis/Critique — Pro preferred
[TaskCategory.NEURO_ANALYSIS]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.YOUTUBE_AUDIT]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.COMMERCIAL_BRIEF]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.CONSISTENCY_CHECK]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
[TaskCategory.SELF_CRITIQUE]: {
flash: 'gemini-2.5-flash',
pro: 'gemini-2.5-pro',
},
};
/**
* Select the best model for a given task and quality preference.
*
* @param task - The task category
* @param preference - User quality preference
* @returns Model identifier string
*/
export function selectModel(
task: TaskCategory,
preference: QualityPreference = 'balanced',
): string {
const models = TASK_MODELS[task];
switch (preference) {
case 'fast':
return models.flash;
case 'quality':
return models.pro;
case 'balanced':
default:
// For analysis/critique tasks, prefer pro even in balanced mode
if (
task === TaskCategory.NEURO_ANALYSIS ||
task === TaskCategory.YOUTUBE_AUDIT ||
task === TaskCategory.CONSISTENCY_CHECK ||
task === TaskCategory.SELF_CRITIQUE
) {
return models.pro;
}
return models.flash;
}
}
/**
* Get model recommendation info
*/
export function getModelInfo(
task: TaskCategory,
preference: QualityPreference = 'balanced',
): {
model: string;
isFlash: boolean;
reason: string;
} {
const model = selectModel(task, preference);
const isFlash = model.includes('flash');
let reason = '';
if (preference === 'fast') {
reason = 'Hızlı mod seçildi — Flash model kullanılıyor';
} else if (preference === 'quality') {
reason = 'Kaliteli mod seçildi — Pro model kullanılıyor';
} else {
reason = isFlash
? 'Bu görev için Flash yeterli — hız optimizasyonu'
: 'Bu görev yüksek kalite gerektiriyor — Pro model seçildi';
}
return { model, isFlash, reason };
}
/**
* Estimate relative cost multiplier for a model
* Flash ≈ 1x, Pro ≈ 4x
*/
export function getModelCostMultiplier(model: string): number {
return model.includes('pro') ? 4.0 : 1.0;
}

View File

@@ -0,0 +1,152 @@
/**
* Token Counter Utility
*
* Estimates token counts for text content. Uses a heuristic-based approach
* that is reasonably accurate for Gemini models without requiring
* an external tokenizer dependency.
*
* Gemini tokenization rules of thumb:
* - English: ~4 characters per token (≈ 0.75 words per token)
* - Turkish: ~3.5 characters per token (morphologically richer)
* - Code/JSON: ~3 characters per token
* - Punctuation: usually 1 token each
*
* TR: Token sayımı için yardımcı araç. Harici tokenizer gerektirmeden
* sezgisel yaklaşımla makul doğrulukta tahmin yapar.
*/
// Model context window limits (input + output)
export const MODEL_LIMITS = {
'gemini-2.5-flash': {
maxInput: 1_048_576, // 1M tokens
maxOutput: 65_536, // 65K tokens
safeInput: 800_000, // Safe limit with margin
},
'gemini-2.5-pro': {
maxInput: 1_048_576,
maxOutput: 65_536,
safeInput: 800_000,
},
'gemini-2.0-flash': {
maxInput: 1_048_576,
maxOutput: 8_192,
safeInput: 900_000,
},
// Fallback for unknown models
default: {
maxInput: 128_000,
maxOutput: 8_192,
safeInput: 100_000,
},
} as const;
export type ModelName = keyof typeof MODEL_LIMITS;
/**
* Estimate token count for a given text.
*
* @param text - The text to estimate tokens for
* @param language - Language hint ('tr', 'en', etc.)
* @returns Estimated token count
*/
export function estimateTokens(text: string, language: string = 'en'): number {
if (!text) return 0;
// Base: character-based estimation
const charCount = text.length;
// Language-specific multipliers
const charsPerToken = language === 'tr' ? 3.5 : 4.0;
// Adjust for special content
const jsonMatches = text.match(/[{}\[\]:,"]/g);
const jsonPenalty = jsonMatches ? jsonMatches.length * 0.3 : 0;
// Newlines and whitespace
const newlineCount = (text.match(/\n/g) || []).length;
const baseTokens = charCount / charsPerToken;
const estimated = baseTokens + jsonPenalty + newlineCount * 0.5;
return Math.ceil(estimated);
}
/**
* Estimate tokens for an array of text segments
*/
export function estimateTokensForSegments(
segments: { narratorScript?: string; visualDescription?: string }[],
language: string = 'en',
): number {
return segments.reduce((total, seg) => {
return (
total +
estimateTokens(seg.narratorScript || '', language) +
estimateTokens(seg.visualDescription || '', language)
);
}, 0);
}
/**
* Get model limits for a given model name
*/
export function getModelLimits(model: string) {
return (MODEL_LIMITS as any)[model] || MODEL_LIMITS.default;
}
/**
* Calculate remaining token budget for output
*/
export function getRemainingBudget(
model: string,
inputTokens: number,
): { remainingInput: number; maxOutput: number; isOverBudget: boolean } {
const limits = getModelLimits(model);
const remainingInput = limits.safeInput - inputTokens;
return {
remainingInput,
maxOutput: limits.maxOutput,
isOverBudget: remainingInput < 0,
};
}
/**
* Token usage report
*/
export interface TokenUsageReport {
estimatedInputTokens: number;
modelLimit: number;
safeLimit: number;
usagePercentage: number;
isOverBudget: boolean;
recommendation: 'ok' | 'trim' | 'map-reduce';
}
/**
* Analyze token usage and provide recommendations
*/
export function analyzeTokenUsage(
inputText: string,
model: string,
language: string = 'en',
): TokenUsageReport {
const estimated = estimateTokens(inputText, language);
const limits = getModelLimits(model);
const usagePercentage = (estimated / limits.safeInput) * 100;
let recommendation: 'ok' | 'trim' | 'map-reduce' = 'ok';
if (usagePercentage > 90) {
recommendation = 'map-reduce';
} else if (usagePercentage > 70) {
recommendation = 'trim';
}
return {
estimatedInputTokens: estimated,
modelLimit: limits.maxInput,
safeLimit: limits.safeInput,
usagePercentage: Math.round(usagePercentage * 10) / 10,
isOverBudget: estimated > limits.safeInput,
recommendation,
};
}

View File

@@ -3,3 +3,4 @@ export * from './scripts.controller';
export * from './research.controller';
export * from './analysis.controller';
export * from './versions.controller';
export * from './jobs.controller';

View File

@@ -0,0 +1,208 @@
import {
Controller,
Get,
Post,
Param,
Body,
Logger,
NotFoundException,
} from '@nestjs/common';
import { ApiTags, ApiOperation, ApiBearerAuth } from '@nestjs/swagger';
import { InjectQueue } from '@nestjs/bullmq';
import { Queue } from 'bullmq';
import {
QUEUES,
JobType,
JobStatus,
} from '../queue/queue.constants';
/**
* JobsController
*
* REST API for managing async AI jobs.
*
* Endpoints:
* - POST /jobs/submit — Submit a new async job
* - GET /jobs/:id/status — Check job status & progress
* - GET /jobs/:id/result — Get job result
*
* TR: Asenkron AI işlerini yönetmek için REST API.
*/
@ApiTags('SkriptAI - Jobs')
@ApiBearerAuth()
@Controller('skriptai/jobs')
export class JobsController {
private readonly logger = new Logger(JobsController.name);
constructor(
@InjectQueue(QUEUES.SCRIPT_GENERATION)
private readonly scriptQueue: Queue,
@InjectQueue(QUEUES.DEEP_RESEARCH)
private readonly researchQueue: Queue,
@InjectQueue(QUEUES.ANALYSIS)
private readonly analysisQueue: Queue,
@InjectQueue(QUEUES.IMAGE_GENERATION)
private readonly imageQueue: Queue,
) {}
/**
* Submit a new async job
*/
@Post('submit')
@ApiOperation({ summary: 'Submit an async AI job' })
async submitJob(
@Body()
body: {
type: JobType;
payload: Record<string, any>;
},
) {
const { type, payload } = body;
const queue = this.getQueueForJobType(type);
const job = await queue.add(type, payload, {
attempts: 2,
backoff: { type: 'exponential', delay: 5000 },
removeOnComplete: { age: 3600 }, // 1 hour
removeOnFail: { age: 86400 }, // 24 hours
});
this.logger.log(
`Job submitted: ${job.id} (${type}) — payload: ${JSON.stringify(payload)}`,
);
return {
jobId: job.id,
type,
status: JobStatus.QUEUED,
};
}
/**
* Check job status and progress
*/
@Get(':id/status')
@ApiOperation({ summary: 'Check job status & progress' })
async getJobStatus(@Param('id') jobId: string) {
const job = await this.findJobById(jobId);
if (!job) {
throw new NotFoundException(`Job ${jobId} not found`);
}
const state = await job.getState();
const progress = job.progress;
return {
jobId: job.id,
type: job.name,
status: this.mapBullState(state),
progress: progress || null,
createdAt: new Date(job.timestamp).toISOString(),
processedOn: job.processedOn
? new Date(job.processedOn).toISOString()
: null,
finishedOn: job.finishedOn
? new Date(job.finishedOn).toISOString()
: null,
failedReason: job.failedReason || null,
};
}
/**
* Get job result
*/
@Get(':id/result')
@ApiOperation({ summary: 'Get completed job result' })
async getJobResult(@Param('id') jobId: string) {
const job = await this.findJobById(jobId);
if (!job) {
throw new NotFoundException(`Job ${jobId} not found`);
}
const state = await job.getState();
if (state !== 'completed') {
return {
jobId: job.id,
status: this.mapBullState(state),
result: null,
message: 'Job has not completed yet',
};
}
return {
jobId: job.id,
status: JobStatus.COMPLETED,
result: job.returnvalue,
};
}
// ========== HELPERS ==========
private getQueueForJobType(type: JobType): Queue {
if (
type === JobType.GENERATE_SCRIPT ||
type === JobType.REGENERATE_SEGMENT ||
type === JobType.REGENERATE_PARTIAL ||
type === JobType.REWRITE_SEGMENT
) {
return this.scriptQueue;
}
if (
type === JobType.DEEP_RESEARCH ||
type === JobType.DISCOVER_QUESTIONS
) {
return this.researchQueue;
}
if (
type === JobType.NEURO_ANALYSIS ||
type === JobType.YOUTUBE_AUDIT ||
type === JobType.COMMERCIAL_BRIEF ||
type === JobType.GENERATE_VISUAL_ASSETS
) {
return this.analysisQueue;
}
if (
type === JobType.GENERATE_SEGMENT_IMAGE ||
type === JobType.GENERATE_THUMBNAIL
) {
return this.imageQueue;
}
throw new Error(`Unknown job type: ${type}`);
}
private async findJobById(jobId: string) {
const queues = [
this.scriptQueue,
this.researchQueue,
this.analysisQueue,
this.imageQueue,
];
for (const queue of queues) {
const job = await queue.getJob(jobId);
if (job) return job;
}
return null;
}
private mapBullState(state: string): JobStatus {
switch (state) {
case 'completed':
return JobStatus.COMPLETED;
case 'failed':
return JobStatus.FAILED;
case 'active':
return JobStatus.PROCESSING;
default:
return JobStatus.QUEUED;
}
}
}

View File

@@ -1,5 +1,6 @@
import {
Controller,
Get,
Post,
Put,
Delete,
@@ -120,4 +121,44 @@ export class ScriptsController {
body.segmentIds,
);
}
// ========== ENHANCED PIPELINE (Faz 2.2) ==========
@Post(':projectId/enrich-topic')
@UseGuards(JwtAuthGuard)
@ApiBearerAuth()
@ApiOperation({ summary: 'Phase 0: Enrich and expand topic with AI' })
async enrichTopic(@Param('projectId') projectId: string) {
return this.scriptsService.enrichTopic(projectId);
}
@Get(':projectId/outline-review')
@UseGuards(JwtAuthGuard)
@ApiBearerAuth()
@ApiOperation({ summary: 'Generate outline for user review (no segments created)' })
async getOutlineForReview(@Param('projectId') projectId: string) {
return this.scriptsService.generateOutlineForReview(projectId);
}
@Post(':projectId/consistency-check')
@UseGuards(JwtAuthGuard)
@ApiBearerAuth()
@ApiOperation({ summary: 'Phase 3: AI consistency & quality review' })
async checkConsistency(@Param('projectId') projectId: string) {
return this.scriptsService.checkConsistency(projectId);
}
@Post(':projectId/self-critique')
@UseGuards(JwtAuthGuard)
@ApiBearerAuth()
@ApiOperation({ summary: 'Phase 4: AI self-critique and auto-rewrite' })
async selfCritique(
@Param('projectId') projectId: string,
@Body() body?: { threshold?: number },
) {
return this.scriptsService.selfCritiqueAndRewrite(
projectId,
body?.threshold,
);
}
}

View File

@@ -0,0 +1,3 @@
export * from './ws-events';
export * from './skriptai.gateway';
export * from './queue-event-bridge';

View File

@@ -0,0 +1,91 @@
import { Injectable, OnModuleInit, Logger } from '@nestjs/common';
import { InjectQueue } from '@nestjs/bullmq';
import { Queue, QueueEvents } from 'bullmq';
import { SkriptaiGateway } from './skriptai.gateway';
import { QUEUES } from '../queue/queue.constants';
/**
* BullMQ → WebSocket Event Bridge
*
* Listens to BullMQ queue events and forwards them to the WebSocket gateway.
* This enables real-time progress notifications for all async jobs.
*
* TR: BullMQ kuyruk eventlerini dinler ve WebSocket gateway'e yönlendirir.
* Böylece tüm asenkron işler için gerçek zamanlı ilerleme bildirimleri sağlanır.
*/
@Injectable()
export class QueueEventBridge implements OnModuleInit {
private readonly logger = new Logger(QueueEventBridge.name);
constructor(
private readonly gateway: SkriptaiGateway,
@InjectQueue(QUEUES.SCRIPT_GENERATION)
private readonly scriptQueue: Queue,
@InjectQueue(QUEUES.DEEP_RESEARCH)
private readonly researchQueue: Queue,
@InjectQueue(QUEUES.ANALYSIS)
private readonly analysisQueue: Queue,
@InjectQueue(QUEUES.IMAGE_GENERATION)
private readonly imageQueue: Queue,
) {}
onModuleInit() {
this.attachListeners(this.scriptQueue);
this.attachListeners(this.researchQueue);
this.attachListeners(this.analysisQueue);
this.attachListeners(this.imageQueue);
this.logger.log('✅ BullMQ → WebSocket event bridge initialized');
}
private attachListeners(queue: Queue) {
const events = new QueueEvents(queue.name, {
connection: queue.opts?.connection as any,
});
events.on('progress', ({ jobId, data }) => {
const progress = data as any;
if (progress && progress.projectId) {
this.gateway.emitJobProgress({
jobId,
jobType: '',
projectId: progress.projectId,
step: progress.step || 0,
totalSteps: progress.totalSteps || 0,
message: progress.message || '',
percentage: progress.percentage || 0,
});
}
});
events.on('completed', async ({ jobId }) => {
try {
const job = await queue.getJob(jobId);
if (job) {
this.gateway.emitJobCompleted({
jobId,
jobType: job.name,
projectId: job.data.projectId || '',
});
}
} catch {
// Job may have been removed
}
});
events.on('failed', async ({ jobId, failedReason }) => {
try {
const job = await queue.getJob(jobId);
if (job) {
this.gateway.emitJobFailed({
jobId,
jobType: job.name,
projectId: job.data.projectId || '',
reason: failedReason || 'Unknown error',
});
}
} catch {
// Job may have been removed
}
});
}
}

View File

@@ -0,0 +1,123 @@
import {
WebSocketGateway,
WebSocketServer,
OnGatewayConnection,
OnGatewayDisconnect,
SubscribeMessage,
} from '@nestjs/websockets';
import { Logger } from '@nestjs/common';
import { Server, Socket } from 'socket.io';
import {
WS_EVENTS,
JobProgressEvent,
JobCompletedEvent,
JobFailedEvent,
SegmentEvent,
VersionEvent,
ProjectStatusEvent,
} from './ws-events';
/**
* SkriptAI WebSocket Gateway
*
* Socket.IO gateway for real-time notifications.
* Clients join project-specific rooms to receive updates.
*
* TR: Gerçek zamanlı bildirimler için Socket.IO gateway.
* İstemciler proje odalarına katılarak güncellemeler alır.
*/
@WebSocketGateway({
namespace: '/skriptai',
cors: {
origin: '*',
credentials: true,
},
})
export class SkriptaiGateway
implements OnGatewayConnection, OnGatewayDisconnect
{
@WebSocketServer()
server: Server;
private readonly logger = new Logger(SkriptaiGateway.name);
handleConnection(client: Socket) {
this.logger.log(`Client connected: ${client.id}`);
}
handleDisconnect(client: Socket) {
this.logger.log(`Client disconnected: ${client.id}`);
}
/**
* Client joins a project room to receive project-specific events
*/
@SubscribeMessage('join:project')
handleJoinProject(client: Socket, projectId: string) {
const room = `project:${projectId}`;
client.join(room);
this.logger.debug(`Client ${client.id} joined room ${room}`);
return { status: 'ok', room };
}
/**
* Client leaves a project room
*/
@SubscribeMessage('leave:project')
handleLeaveProject(client: Socket, projectId: string) {
const room = `project:${projectId}`;
client.leave(room);
this.logger.debug(`Client ${client.id} left room ${room}`);
return { status: 'ok' };
}
// ========== EMIT METHODS (called by processors/services) ==========
/**
* Emit job progress to all clients in the project room
*/
emitJobProgress(event: JobProgressEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(WS_EVENTS.JOB_PROGRESS, event);
}
/**
* Emit job completed
*/
emitJobCompleted(event: JobCompletedEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(WS_EVENTS.JOB_COMPLETED, event);
}
/**
* Emit job failed
*/
emitJobFailed(event: JobFailedEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(WS_EVENTS.JOB_FAILED, event);
}
/**
* Emit segment generated/updated
*/
emitSegmentEvent(eventName: string, event: SegmentEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(eventName, event);
}
/**
* Emit version created/restored
*/
emitVersionEvent(eventName: string, event: VersionEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(eventName, event);
}
/**
* Emit project status change
*/
emitProjectStatusChanged(event: ProjectStatusEvent) {
const room = `project:${event.projectId}`;
this.server.to(room).emit(WS_EVENTS.PROJECT_STATUS_CHANGED, event);
}
}

View File

@@ -0,0 +1,66 @@
/**
* WebSocket Event Constants
*
* All WebSocket event names used across the system.
*
* TR: Sistemde kullanılan tüm WebSocket event isimleri.
*/
export const WS_EVENTS = {
// Job lifecycle events
JOB_PROGRESS: 'job:progress',
JOB_COMPLETED: 'job:completed',
JOB_FAILED: 'job:failed',
// Content events
SEGMENT_GENERATED: 'segment:generated',
SEGMENT_UPDATED: 'segment:updated',
VERSION_CREATED: 'version:created',
VERSION_RESTORED: 'version:restored',
// Project events
PROJECT_STATUS_CHANGED: 'project:status-changed',
} as const;
// Payload types
export interface JobProgressEvent {
jobId: string;
jobType: string;
projectId: string;
step: number;
totalSteps: number;
message: string;
percentage: number;
}
export interface JobCompletedEvent {
jobId: string;
jobType: string;
projectId: string;
result?: any;
}
export interface JobFailedEvent {
jobId: string;
jobType: string;
projectId: string;
reason: string;
}
export interface SegmentEvent {
segmentId: string;
projectId: string;
segmentType?: string;
}
export interface VersionEvent {
versionId: string;
projectId: string;
versionNumber: number;
label?: string;
}
export interface ProjectStatusEvent {
projectId: string;
status: string;
previousStatus?: string;
}

View File

@@ -0,0 +1,103 @@
/**
* Consistency Check Prompt Builder
*
* Phase 3: After all segments are generated, AI reviews the entire
* script for tone consistency, flow, pacing, and logical connections.
*
* TR: Tutarlılık kontrolü — tüm segmentler üretildikten sonra ton, akış ve mantık kontrolü.
*/
export interface ConsistencyCheckInput {
segments: {
type: string;
narratorScript: string;
visualDescription?: string;
}[];
speechStyles: string[];
targetAudience: string[];
topic: string;
language: string;
}
export function buildConsistencyCheckPrompt(input: ConsistencyCheckInput) {
const segmentText = input.segments
.map(
(s, i) =>
`[Segment ${i + 1}${s.type}]\n${s.narratorScript}\nVisual: ${s.visualDescription || 'N/A'}`,
)
.join('\n\n');
const prompt = `You are a senior script editor and quality assurance specialist.
TASK: Review the entire script below for consistency, quality, and flow.
TOPIC: "${input.topic}"
SPEECH STYLE: ${input.speechStyles.join(', ')}
TARGET AUDIENCE: ${input.targetAudience.join(', ')}
LANGUAGE: ${input.language}
FULL SCRIPT:
${segmentText}
EVALUATE AND PROVIDE:
1. "overallScore" — Quality score 1-100
2. "toneConsistency" — Score 1-10 for consistent tone/voice throughout
3. "flowScore" — Score 1-10 for smooth transitions and logical progression
4. "pacingScore" — Score 1-10 for good pacing (not too fast/slow)
5. "engagementScore" — Score 1-10 for how engaging the content is
6. "issues" — Array of specific issues found:
- "segmentIndex": which segment (0-based)
- "type": "tone_mismatch" | "flow_break" | "pacing_issue" | "repetition" | "logic_gap" | "weak_content"
- "description": human-readable explanation
- "severity": "low" | "medium" | "high"
- "suggestedFix": how to fix this issue
7. "segmentsToRewrite" — Array of segment indexes (0-based) that should be rewritten
8. "generalSuggestions" — Overall improvement suggestions (max 5)
Be rigorous but fair. Only flag genuine issues that would impact the audience experience.
Respond in ${input.language}.`;
const schema = {
type: 'object' as const,
properties: {
overallScore: { type: 'number' as const },
toneConsistency: { type: 'number' as const },
flowScore: { type: 'number' as const },
pacingScore: { type: 'number' as const },
engagementScore: { type: 'number' as const },
issues: {
type: 'array' as const,
items: {
type: 'object' as const,
properties: {
segmentIndex: { type: 'number' as const },
type: { type: 'string' as const },
description: { type: 'string' as const },
severity: { type: 'string' as const },
suggestedFix: { type: 'string' as const },
},
},
},
segmentsToRewrite: {
type: 'array' as const,
items: { type: 'number' as const },
},
generalSuggestions: {
type: 'array' as const,
items: { type: 'string' as const },
},
},
required: [
'overallScore',
'toneConsistency',
'flowScore',
'pacingScore',
'engagementScore',
'issues',
'segmentsToRewrite',
'generalSuggestions',
],
};
return { prompt, temperature: 0.3, schema: JSON.stringify(schema) };
}

View File

@@ -52,3 +52,19 @@ export {
type CommercialBriefInput,
type VisualAssetKeywordsInput,
} from './analysis.prompt';
// Pipeline Enhancements (Faz 2.2)
export {
buildTopicEnrichmentPrompt,
type TopicEnrichmentInput,
} from './topic-enrichment.prompt';
export {
buildConsistencyCheckPrompt,
type ConsistencyCheckInput,
} from './consistency-check.prompt';
export {
buildSelfCritiquePrompt,
type SelfCritiqueInput,
} from './self-critique.prompt';

View File

@@ -0,0 +1,91 @@
/**
* Self-Critique Prompt Builder
*
* Phase 4: AI critiques individual segments, scoring them on multiple
* dimensions. Low-scoring segments are automatically flagged for rewrite.
*
* TR: Öz-eleştiri — AI her segmenti birden fazla boyutta puanlar, düşük puanlıları yeniden yazmak üzere işaretler.
*/
export interface SelfCritiqueInput {
segment: {
type: string;
narratorScript: string;
visualDescription?: string;
onScreenText?: string;
};
segmentIndex: number;
topic: string;
speechStyles: string[];
targetAudience: string[];
language: string;
}
export function buildSelfCritiquePrompt(input: SelfCritiqueInput) {
const prompt = `You are a ruthless but fair content critic and quality scorer.
TASK: Score the following script segment in multiple dimensions and provide rewrite recommendations if quality is low.
TOPIC: "${input.topic}"
SEGMENT INDEX: ${input.segmentIndex}
SEGMENT TYPE: ${input.segment.type}
SPEECH STYLE: ${input.speechStyles.join(', ')}
TARGET AUDIENCE: ${input.targetAudience.join(', ')}
LANGUAGE: ${input.language}
SEGMENT CONTENT:
---
NARRATOR: ${input.segment.narratorScript}
VISUAL: ${input.segment.visualDescription || 'Not specified'}
ON-SCREEN TEXT: ${input.segment.onScreenText || 'None'}
---
SCORE EACH DIMENSION (1-10):
1. "clarity" — Is the message clear and easy to understand?
2. "engagement" — Does it hook and maintain attention?
3. "originality" — Is it fresh and not generic?
4. "audienceMatch" — Does it match the target audience tone?
5. "visualAlignment" — Do script and visual description complement each other?
6. "emotionalImpact" — Does it evoke the intended emotion?
ALSO PROVIDE:
7. "averageScore" — Average of all scores
8. "shouldRewrite" — true if averageScore < 6.5
9. "weaknesses" — Array of specific weaknesses (max 3)
10. "rewriteInstructions" — If shouldRewrite is true, specific instructions for improvement
Be honest and critical. Don't inflate scores.
Respond in ${input.language}.`;
const schema = {
type: 'object' as const,
properties: {
clarity: { type: 'number' as const },
engagement: { type: 'number' as const },
originality: { type: 'number' as const },
audienceMatch: { type: 'number' as const },
visualAlignment: { type: 'number' as const },
emotionalImpact: { type: 'number' as const },
averageScore: { type: 'number' as const },
shouldRewrite: { type: 'boolean' as const },
weaknesses: {
type: 'array' as const,
items: { type: 'string' as const },
},
rewriteInstructions: { type: 'string' as const },
},
required: [
'clarity',
'engagement',
'originality',
'audienceMatch',
'visualAlignment',
'emotionalImpact',
'averageScore',
'shouldRewrite',
'weaknesses',
],
};
return { prompt, temperature: 0.2, schema: JSON.stringify(schema) };
}

View File

@@ -0,0 +1,80 @@
/**
* Topic Enrichment Prompt Builder
*
* Phase 0: Before outline generation, AI expands and refines the topic.
* Provides additional angles, sub-topics, and creative directions.
*
* TR: Konu zenginleştirme — outline üretilmeden önce konuyu AI ile genişletir.
*/
export interface TopicEnrichmentInput {
topic: string;
contentType: string;
targetAudience: string[];
language: string;
userNotes?: string;
}
export function buildTopicEnrichmentPrompt(input: TopicEnrichmentInput) {
const prompt = `You are a world-class content strategist and creative director.
TASK: Enrich and expand the following topic into a comprehensive content brief.
TOPIC: "${input.topic}"
CONTENT TYPE: ${input.contentType}
TARGET AUDIENCE: ${input.targetAudience.join(', ')}
LANGUAGE: ${input.language}
${input.userNotes ? `USER NOTES: ${input.userNotes}` : ''}
REQUIREMENTS:
1. "enrichedTopic" — A refined, more compelling version of the topic (catchy, SEO-friendly)
2. "angles" — 3-5 unique angles/perspectives to approach this topic
3. "subTopics" — 5-8 key sub-topics that should be covered
4. "hookIdeas" — 3 powerful hook ideas to start the content
5. "emotionalCore" — The primary emotional journey the audience should feel
6. "uniqueValue" — What makes this content different from existing content on this topic
7. "keyQuestions" — 5-7 questions the audience would want answered
8. "controversialTakes" — 2-3 thought-provoking or controversial perspectives (optional, if relevant)
Respond in ${input.language}. Be creative and think beyond the obvious.`;
const schema = {
type: 'object' as const,
properties: {
enrichedTopic: { type: 'string' as const },
angles: {
type: 'array' as const,
items: { type: 'string' as const },
},
subTopics: {
type: 'array' as const,
items: { type: 'string' as const },
},
hookIdeas: {
type: 'array' as const,
items: { type: 'string' as const },
},
emotionalCore: { type: 'string' as const },
uniqueValue: { type: 'string' as const },
keyQuestions: {
type: 'array' as const,
items: { type: 'string' as const },
},
controversialTakes: {
type: 'array' as const,
items: { type: 'string' as const },
},
},
required: [
'enrichedTopic',
'angles',
'subTopics',
'hookIdeas',
'emotionalCore',
'uniqueValue',
'keyQuestions',
],
};
return { prompt, temperature: 0.9, schema: JSON.stringify(schema) };
}

View File

@@ -0,0 +1,80 @@
import { Processor, WorkerHost } from '@nestjs/bullmq';
import { Logger } from '@nestjs/common';
import { Job } from 'bullmq';
import { AnalysisService } from '../services/analysis.service';
import {
QUEUES,
JobType,
AnalysisPayload,
JobResult,
} from './queue.constants';
/**
* Analysis Queue Processor
*
* Handles async analysis jobs: neuro, youtube audit, commercial brief, visual assets.
*
* TR: Asenkron analiz işlerini yönetir.
*/
@Processor(QUEUES.ANALYSIS)
export class AnalysisProcessor extends WorkerHost {
private readonly logger = new Logger(AnalysisProcessor.name);
constructor(private readonly analysisService: AnalysisService) {
super();
}
async process(job: Job<any, JobResult>): Promise<JobResult> {
this.logger.log(`Processing analysis job ${job.id} — type: ${job.name}`);
try {
switch (job.name) {
case JobType.NEURO_ANALYSIS:
return await this.handleNeuro(job as Job<AnalysisPayload>);
case JobType.YOUTUBE_AUDIT:
return await this.handleYoutube(job as Job<AnalysisPayload>);
case JobType.COMMERCIAL_BRIEF:
return await this.handleCommercial(job as Job<AnalysisPayload>);
case JobType.GENERATE_VISUAL_ASSETS:
return await this.handleVisualAssets(job as Job<AnalysisPayload>);
default:
throw new Error(`Unknown analysis job type: ${job.name}`);
}
} catch (error: any) {
this.logger.error(`Analysis job ${job.id} failed: ${error.message}`);
return { success: false, error: error.message };
}
}
private async handleNeuro(job: Job<AnalysisPayload>): Promise<JobResult> {
await job.updateProgress({ step: 1, totalSteps: 2, message: 'Nöro-pazarlama analizi yapılıyor...', percentage: 30 });
const result = await this.analysisService.analyzeNeuroMarketing(job.data.projectId);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Analiz tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleYoutube(job: Job<AnalysisPayload>): Promise<JobResult> {
await job.updateProgress({ step: 1, totalSteps: 2, message: 'YouTube audit yapılıyor...', percentage: 30 });
const result = await this.analysisService.performYoutubeAudit(job.data.projectId);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Audit tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleCommercial(job: Job<AnalysisPayload>): Promise<JobResult> {
await job.updateProgress({ step: 1, totalSteps: 2, message: 'Ticari brief oluşturuluyor...', percentage: 30 });
const result = await this.analysisService.generateCommercialBrief(job.data.projectId);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Brief tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleVisualAssets(job: Job<AnalysisPayload>): Promise<JobResult> {
await job.updateProgress({ step: 1, totalSteps: 2, message: 'Görsel varlıklar üretiliyor...', percentage: 30 });
const result = await this.analysisService.generateVisualAssets(job.data.projectId);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
}

View File

@@ -0,0 +1,4 @@
export * from './queue.constants';
export * from './script.processor';
export * from './research.processor';
export * from './analysis.processor';

View File

@@ -0,0 +1,96 @@
/**
* Queue Constants
*
* Central definition of all BullMQ queue names and job types.
*
* TR: Tüm BullMQ kuyruk adları ve iş tipleri merkezi tanımı.
*/
// Queue names
export const QUEUES = {
SCRIPT_GENERATION: 'script-generation',
DEEP_RESEARCH: 'deep-research',
ANALYSIS: 'analysis',
IMAGE_GENERATION: 'image-generation',
} as const;
// Job type discriminators
export enum JobType {
// Script
GENERATE_SCRIPT = 'generate-script',
REGENERATE_SEGMENT = 'regenerate-segment',
REGENERATE_PARTIAL = 'regenerate-partial',
REWRITE_SEGMENT = 'rewrite-segment',
// Research
DEEP_RESEARCH = 'deep-research',
DISCOVER_QUESTIONS = 'discover-questions',
// Analysis
NEURO_ANALYSIS = 'neuro-analysis',
YOUTUBE_AUDIT = 'youtube-audit',
COMMERCIAL_BRIEF = 'commercial-brief',
GENERATE_VISUAL_ASSETS = 'generate-visual-assets',
// Image
GENERATE_SEGMENT_IMAGE = 'generate-segment-image',
GENERATE_THUMBNAIL = 'generate-thumbnail',
}
// Job status for tracking
export enum JobStatus {
QUEUED = 'QUEUED',
PROCESSING = 'PROCESSING',
COMPLETED = 'COMPLETED',
FAILED = 'FAILED',
}
// Job payload interfaces
export interface ScriptGenerationPayload {
projectId: string;
userId?: string;
}
export interface SegmentRegeneratePayload {
segmentId: string;
projectId: string;
}
export interface PartialRegeneratePayload {
projectId: string;
segmentIds: string[];
}
export interface RewriteSegmentPayload {
segmentId: string;
newStyle: string;
projectId: string;
}
export interface DeepResearchPayload {
projectId: string;
}
export interface AnalysisPayload {
projectId: string;
}
export interface ImageGenerationPayload {
segmentId: string;
projectId: string;
}
// Job result
export interface JobResult {
success: boolean;
data?: any;
error?: string;
}
// Job progress detail
export interface JobProgress {
step: number;
totalSteps: number;
message: string;
percentage: number;
}

View File

@@ -0,0 +1,78 @@
import { Processor, WorkerHost } from '@nestjs/bullmq';
import { Logger } from '@nestjs/common';
import { Job } from 'bullmq';
import { ResearchService } from '../services/research.service';
import {
QUEUES,
JobType,
DeepResearchPayload,
JobResult,
} from './queue.constants';
/**
* Research Queue Processor
*
* Handles async research jobs: deep research, discovery questions.
*
* TR: Asenkron araştırma işlerini yönetir.
*/
@Processor(QUEUES.DEEP_RESEARCH)
export class ResearchProcessor extends WorkerHost {
private readonly logger = new Logger(ResearchProcessor.name);
constructor(private readonly researchService: ResearchService) {
super();
}
async process(job: Job<any, JobResult>): Promise<JobResult> {
this.logger.log(`Processing research job ${job.id} — type: ${job.name}`);
try {
switch (job.name) {
case JobType.DEEP_RESEARCH:
return await this.handleDeepResearch(
job as Job<DeepResearchPayload>,
);
case JobType.DISCOVER_QUESTIONS:
return await this.handleDiscoverQuestions(
job as Job<DeepResearchPayload>,
);
default:
throw new Error(`Unknown research job type: ${job.name}`);
}
} catch (error: any) {
this.logger.error(`Research job ${job.id} failed: ${error.message}`);
return { success: false, error: error.message };
}
}
private async handleDeepResearch(
job: Job<DeepResearchPayload>,
): Promise<JobResult> {
const { projectId } = job.data;
await job.updateProgress({ step: 1, totalSteps: 3, message: 'Araştırma başlatılıyor...', percentage: 10 });
const result = await this.researchService.performDeepResearch(projectId);
await job.updateProgress({ step: 3, totalSteps: 3, message: 'Araştırma tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleDiscoverQuestions(
job: Job<DeepResearchPayload>,
): Promise<JobResult> {
const { projectId } = job.data;
await job.updateProgress({ step: 1, totalSteps: 2, message: 'Keşif soruları üretiliyor...', percentage: 30 });
const result = await this.researchService.generateDiscoveryQuestions(projectId, '');
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
}

View File

@@ -0,0 +1,144 @@
import { Processor, WorkerHost } from '@nestjs/bullmq';
import { Logger } from '@nestjs/common';
import { Job } from 'bullmq';
import { ScriptsService } from '../services/scripts.service';
import { VersionsService } from '../services/versions.service';
import {
QUEUES,
JobType,
ScriptGenerationPayload,
SegmentRegeneratePayload,
PartialRegeneratePayload,
RewriteSegmentPayload,
JobResult,
} from './queue.constants';
/**
* Script Queue Processor
*
* Handles all script-related async jobs:
* - Full script generation
* - Single segment regeneration
* - Partial regeneration
* - Segment rewrite
*
* TR: Script ile ilgili tüm asenkron işleri yönetir.
*/
@Processor(QUEUES.SCRIPT_GENERATION)
export class ScriptProcessor extends WorkerHost {
private readonly logger = new Logger(ScriptProcessor.name);
constructor(
private readonly scriptsService: ScriptsService,
private readonly versionsService: VersionsService,
) {
super();
}
async process(job: Job<any, JobResult>): Promise<JobResult> {
this.logger.log(`Processing job ${job.id} — type: ${job.name}`);
try {
switch (job.name) {
case JobType.GENERATE_SCRIPT:
return await this.handleGenerateScript(
job as Job<ScriptGenerationPayload>,
);
case JobType.REGENERATE_SEGMENT:
return await this.handleRegenerateSegment(
job as Job<SegmentRegeneratePayload>,
);
case JobType.REGENERATE_PARTIAL:
return await this.handleRegeneratePartial(
job as Job<PartialRegeneratePayload>,
);
case JobType.REWRITE_SEGMENT:
return await this.handleRewriteSegment(
job as Job<RewriteSegmentPayload>,
);
default:
throw new Error(`Unknown job type: ${job.name}`);
}
} catch (error: any) {
this.logger.error(`Job ${job.id} failed: ${error.message}`);
return { success: false, error: error.message };
}
}
private async handleGenerateScript(
job: Job<ScriptGenerationPayload>,
): Promise<JobResult> {
const { projectId } = job.data;
await job.updateProgress({ step: 1, totalSteps: 3, message: 'Script hazırlanıyor...', percentage: 10 });
const result = await this.scriptsService.generateScript(projectId);
await job.updateProgress({ step: 3, totalSteps: 3, message: 'Script tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleRegenerateSegment(
job: Job<SegmentRegeneratePayload>,
): Promise<JobResult> {
const { segmentId } = job.data;
await job.updateProgress({ step: 1, totalSteps: 2, message: 'Segment yeniden yazılıyor...', percentage: 30 });
const result = await this.scriptsService.regenerateSegment(segmentId);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
private async handleRegeneratePartial(
job: Job<PartialRegeneratePayload>,
): Promise<JobResult> {
const { projectId, segmentIds } = job.data;
const total = segmentIds.length;
await job.updateProgress({
step: 0,
totalSteps: total,
message: `${total} segment yeniden yazılacak...`,
percentage: 5,
});
const result = await this.scriptsService.regeneratePartial(
projectId,
segmentIds,
);
await job.updateProgress({
step: total,
totalSteps: total,
message: 'Tüm segmentler tamamlandı!',
percentage: 100,
});
return { success: true, data: result };
}
private async handleRewriteSegment(
job: Job<RewriteSegmentPayload>,
): Promise<JobResult> {
const { segmentId, newStyle } = job.data;
await job.updateProgress({ step: 1, totalSteps: 2, message: `"${newStyle}" stiliyle yeniden yazılıyor...`, percentage: 30 });
const result = await this.scriptsService.rewriteSegment(
segmentId,
newStyle,
);
await job.updateProgress({ step: 2, totalSteps: 2, message: 'Tamamlandı!', percentage: 100 });
return { success: true, data: result };
}
}

View File

@@ -9,6 +9,9 @@ import {
buildChapterSegmentPrompt,
buildSegmentRewritePrompt,
buildSegmentImagePrompt,
buildTopicEnrichmentPrompt,
buildConsistencyCheckPrompt,
buildSelfCritiquePrompt,
calculateTargetWordCount,
calculateEstimatedChapters,
} from '../prompts';
@@ -479,4 +482,325 @@ export class ScriptsService {
return results;
}
// ========== ENHANCED PIPELINE (Faz 2.2) ==========
/**
* Phase 0: Enrich and expand the topic before script generation.
*
* TR: Konu zenginleştirme — AI ile konuyu genişletir ve derinleştirir.
*/
async enrichTopic(projectId: string) {
this.logger.log(`Enriching topic for project: ${projectId}`);
const project = await this.prisma.scriptProject.findUnique({
where: { id: projectId },
});
if (!project) {
throw new NotFoundException(`Project with ID ${projectId} not found`);
}
const promptData = buildTopicEnrichmentPrompt({
topic: project.topic,
contentType: project.contentType,
targetAudience: project.targetAudience,
language: project.language,
userNotes: project.userNotes || undefined,
});
const resp = await this.gemini.generateJSON<{
enrichedTopic: string;
angles: string[];
subTopics: string[];
hookIdeas: string[];
emotionalCore: string;
uniqueValue: string;
keyQuestions: string[];
controversialTakes?: string[];
}>(promptData.prompt, promptData.schema, {
temperature: promptData.temperature,
});
// Store enrichment data as user notes supplement
const enrichmentSummary = [
`🎯 Zenginleştirilmiş Konu: ${resp.data.enrichedTopic}`,
`\n📐 Açılar: ${resp.data.angles.join(' | ')}`,
`\n📚 Alt Konular: ${resp.data.subTopics.join(', ')}`,
`\n🎣 Hook Fikirleri: ${resp.data.hookIdeas.join(' | ')}`,
`\n💫 Duygusal Çekirdek: ${resp.data.emotionalCore}`,
`\n🔑 Ayırt Edici Değer: ${resp.data.uniqueValue}`,
].join('');
await this.prisma.scriptProject.update({
where: { id: projectId },
data: {
userNotes: project.userNotes
? `${project.userNotes}\n\n--- AI Zenginleştirme ---\n${enrichmentSummary}`
: enrichmentSummary,
},
});
return resp.data;
}
/**
* Get the current outline for user review/editing before generation.
* Returns the outline without generating any segments.
*
* TR: Outline'ı üretip kullanıcıya gönderir, henüz segment oluşturmaz.
*/
async generateOutlineForReview(projectId: string) {
this.logger.log(`Generating outline for review: ${projectId}`);
const project = await this.prisma.scriptProject.findUnique({
where: { id: projectId },
include: {
sources: { where: { selected: true } },
briefItems: { orderBy: { sortOrder: 'asc' } },
characters: true,
},
});
if (!project) {
throw new NotFoundException(`Project with ID ${projectId} not found`);
}
const sourceContext = project.sources
.slice(0, 5)
.map((s, i) => `[Source ${i + 1}] (${s.type}): ${s.title} - ${s.snippet}`)
.join('\n');
const briefContext = project.briefItems
.map((b) => `Q: ${b.question}\nA: ${b.answer}`)
.join('\n');
const characterContext = project.characters
.map((c) => `${c.name} (${c.role}): Values[${c.values}] Traits[${c.traits}]`)
.join('\n');
const targetWordCount = calculateTargetWordCount(project.targetDuration);
const estimatedChapters = calculateEstimatedChapters(targetWordCount);
const outlinePromptData = buildScriptOutlinePrompt({
topic: project.topic,
logline: project.logline || '',
characterContext,
speechStyles: project.speechStyle,
targetAudience: project.targetAudience,
contentType: project.contentType,
targetDuration: project.targetDuration,
targetWordCount,
estimatedChapters,
sourceContext,
briefContext,
});
const outlineResp = await this.gemini.generateJSON<{
title: string;
seoDescription: string;
tags: string[];
thumbnailIdeas: string[];
chapters: { title: string; focus: string; type: string }[];
}>(outlinePromptData.prompt, outlinePromptData.schema, {
temperature: outlinePromptData.temperature,
});
return {
outline: outlineResp.data,
targetWordCount,
estimatedChapters,
};
}
/**
* Phase 3: Consistency check — AI reviews the entire script.
*
* TR: Tutarlılık kontrolü — tüm scripti ton, akış ve mantık açısından inceler.
*/
async checkConsistency(projectId: string) {
this.logger.log(`Running consistency check for project: ${projectId}`);
const project = await this.prisma.scriptProject.findUnique({
where: { id: projectId },
include: {
segments: { orderBy: { sortOrder: 'asc' } },
},
});
if (!project || !project.segments.length) {
throw new NotFoundException('Project or segments not found');
}
const promptData = buildConsistencyCheckPrompt({
segments: project.segments.map((s) => ({
type: s.segmentType,
narratorScript: s.narratorScript || '',
visualDescription: s.visualDescription || undefined,
})),
speechStyles: project.speechStyle,
targetAudience: project.targetAudience,
topic: project.topic,
language: project.language,
});
const resp = await this.gemini.generateJSON<{
overallScore: number;
toneConsistency: number;
flowScore: number;
pacingScore: number;
engagementScore: number;
issues: {
segmentIndex: number;
type: string;
description: string;
severity: string;
suggestedFix: string;
}[];
segmentsToRewrite: number[];
generalSuggestions: string[];
}>(promptData.prompt, promptData.schema, {
temperature: promptData.temperature,
});
return resp.data;
}
/**
* Phase 4: Self-critique and auto-rewrite low-quality segments.
*
* AI scores each segment and automatically rewrites segments with
* averageScore < threshold (default: 6.5).
*
* TR: Öz-eleştiri — her segmenti puanlar, düşük puanlıları otomatik yeniden yazar.
*/
async selfCritiqueAndRewrite(
projectId: string,
threshold: number = 6.5,
) {
this.logger.log(`Running self-critique for project: ${projectId} (threshold: ${threshold})`);
const project = await this.prisma.scriptProject.findUnique({
where: { id: projectId },
include: {
segments: { orderBy: { sortOrder: 'asc' } },
},
});
if (!project || !project.segments.length) {
throw new NotFoundException('Project or segments not found');
}
// Auto-snapshot before self-critique rewrites
await this.versionsService.createSnapshot(
projectId,
'AUTO_SAVE',
undefined,
'Auto-save before self-critique',
).catch(() => {});
const critiqueResults: {
segmentIndex: number;
segmentId: string;
scores: {
clarity: number;
engagement: number;
originality: number;
audienceMatch: number;
visualAlignment: number;
emotionalImpact: number;
averageScore: number;
};
shouldRewrite: boolean;
weaknesses: string[];
wasRewritten: boolean;
}[] = [];
for (let i = 0; i < project.segments.length; i++) {
const segment = project.segments[i];
const promptData = buildSelfCritiquePrompt({
segment: {
type: segment.segmentType,
narratorScript: segment.narratorScript || '',
visualDescription: segment.visualDescription || undefined,
onScreenText: segment.onScreenText || undefined,
},
segmentIndex: i,
topic: project.topic,
speechStyles: project.speechStyle,
targetAudience: project.targetAudience,
language: project.language,
});
try {
const resp = await this.gemini.generateJSON<{
clarity: number;
engagement: number;
originality: number;
audienceMatch: number;
visualAlignment: number;
emotionalImpact: number;
averageScore: number;
shouldRewrite: boolean;
weaknesses: string[];
rewriteInstructions?: string;
}>(promptData.prompt, promptData.schema, {
temperature: promptData.temperature,
});
const critique = resp.data;
let wasRewritten = false;
// Auto-rewrite if below threshold
if (critique.averageScore < threshold && critique.rewriteInstructions) {
try {
await this.rewriteSegment(
segment.id,
critique.rewriteInstructions,
);
wasRewritten = true;
this.logger.log(
`Segment ${i + 1} rewritten (score: ${critique.averageScore})`,
);
} catch {
this.logger.warn(`Failed to rewrite segment ${i + 1}`);
}
}
critiqueResults.push({
segmentIndex: i,
segmentId: segment.id,
scores: {
clarity: critique.clarity,
engagement: critique.engagement,
originality: critique.originality,
audienceMatch: critique.audienceMatch,
visualAlignment: critique.visualAlignment,
emotionalImpact: critique.emotionalImpact,
averageScore: critique.averageScore,
},
shouldRewrite: critique.shouldRewrite,
weaknesses: critique.weaknesses,
wasRewritten,
});
} catch (error) {
this.logger.warn(`Self-critique failed for segment ${i + 1}: ${error}`);
}
}
const rewrittenCount = critiqueResults.filter((r) => r.wasRewritten).length;
const avgScore =
critiqueResults.length > 0
? critiqueResults.reduce((sum, r) => sum + r.scores.averageScore, 0) /
critiqueResults.length
: 0;
return {
overallAverageScore: Math.round(avgScore * 10) / 10,
totalSegments: project.segments.length,
segmentsRewritten: rewrittenCount,
critiques: critiqueResults,
};
}
}

View File

@@ -1,4 +1,5 @@
import { Module } from '@nestjs/common';
import { BullModule } from '@nestjs/bullmq';
import { DatabaseModule } from '../../database/database.module';
import { GeminiModule } from '../gemini/gemini.module';
@@ -10,6 +11,7 @@ import {
AnalysisController,
VersionsController,
} from './controllers';
import { JobsController } from './controllers/jobs.controller';
// Services
import {
@@ -20,6 +22,16 @@ import {
VersionsService,
} from './services';
// Queue
import { QUEUES } from './queue/queue.constants';
import { ScriptProcessor } from './queue/script.processor';
import { ResearchProcessor } from './queue/research.processor';
import { AnalysisProcessor } from './queue/analysis.processor';
// Gateway (WebSocket)
import { SkriptaiGateway } from './gateway/skriptai.gateway';
import { QueueEventBridge } from './gateway/queue-event-bridge';
/**
* SkriptAI Module
*
@@ -33,18 +45,30 @@ import {
* - YouTube audit
* - Commercial brief generation
* - Version history & content management
* - BullMQ async job processing
*
* TR: SkriptAI ana modülü - AI destekli video script üretimi.
* EN: Main module for the SkriptAI feature - AI-powered video script generation.
*/
@Module({
imports: [DatabaseModule, GeminiModule],
imports: [
DatabaseModule,
GeminiModule,
// BullMQ Queues
BullModule.registerQueue(
{ name: QUEUES.SCRIPT_GENERATION },
{ name: QUEUES.DEEP_RESEARCH },
{ name: QUEUES.ANALYSIS },
{ name: QUEUES.IMAGE_GENERATION },
),
],
controllers: [
ProjectsController,
ScriptsController,
ResearchController,
AnalysisController,
VersionsController,
JobsController,
],
providers: [
ProjectsService,
@@ -52,6 +76,15 @@ import {
ResearchService,
AnalysisService,
VersionsService,
// Queue Processors
ScriptProcessor,
ResearchProcessor,
AnalysisProcessor,
// WebSocket
SkriptaiGateway,
QueueEventBridge,
],
exports: [
ProjectsService,

View File

@@ -0,0 +1,2 @@
export * from './storage.service';
export * from './storage.module';

View File

@@ -0,0 +1,19 @@
import { Module, Global } from '@nestjs/common';
import { ConfigModule } from '@nestjs/config';
import { StorageService } from './storage.service';
/**
* Storage Module — MinIO/S3 Object Storage
*
* Global module providing StorageService for file uploads.
* Configure via environment variables (see .env.example).
*
* TR: MinIO/S3 nesne depolama modülü.
*/
@Global()
@Module({
imports: [ConfigModule],
providers: [StorageService],
exports: [StorageService],
})
export class StorageModule {}

View File

@@ -0,0 +1,243 @@
import { Injectable, Logger, OnModuleInit } from '@nestjs/common';
import { ConfigService } from '@nestjs/config';
import {
S3Client,
PutObjectCommand,
GetObjectCommand,
DeleteObjectCommand,
HeadBucketCommand,
CreateBucketCommand,
} from '@aws-sdk/client-s3';
import { randomUUID } from 'crypto';
/**
* StorageService — S3-Compatible Object Storage (MinIO)
*
* Handles file uploads (images, documents) to MinIO/S3.
* Key features:
* - Base64 data URI → MinIO upload → public URL
* - Buffer/Stream upload support
* - Auto bucket creation
* - Organized folder structure: images/segments/, images/thumbnails/
*
* TR: MinIO/S3 uyumlu nesne depolama servisi.
* Base64 görselleri URL'ye dönüştürür.
*/
@Injectable()
export class StorageService implements OnModuleInit {
private readonly logger = new Logger(StorageService.name);
private client: S3Client | null = null;
private isEnabled = false;
private bucket: string;
private publicUrl: string;
constructor(private readonly configService: ConfigService) {
this.bucket = this.configService.get<string>(
'storage.bucket',
'skriptai-assets',
);
this.publicUrl = this.configService.get<string>(
'storage.publicUrl',
'http://localhost:9000',
);
}
async onModuleInit() {
const endpoint = this.configService.get<string>('storage.endpoint');
const accessKey = this.configService.get<string>('storage.accessKey');
const secretKey = this.configService.get<string>('storage.secretKey');
const enabled = this.configService.get<boolean>('storage.enabled', false);
if (!enabled || !endpoint || !accessKey || !secretKey) {
this.logger.log(
'Storage is disabled. Set STORAGE_ENABLED=true with MinIO credentials.',
);
return;
}
try {
this.client = new S3Client({
endpoint,
region: 'us-east-1', // Required but unused for MinIO
credentials: {
accessKeyId: accessKey,
secretAccessKey: secretKey,
},
forcePathStyle: true, // Required for MinIO
});
// Ensure bucket exists
await this.ensureBucket();
this.isEnabled = true;
this.logger.log(`✅ Storage connected: ${endpoint}/${this.bucket}`);
} catch (error) {
this.logger.error('Failed to connect to storage', error);
}
}
/**
* Check if storage is available
*/
isAvailable(): boolean {
return this.isEnabled && this.client !== null;
}
/**
* Upload a Base64 data URI to storage
*
* @param base64DataUri - Data URI (e.g., "data:image/png;base64,iVBOR...")
* @param folder - Target folder (e.g., "images/segments")
* @param filename - Optional filename (auto-generated if omitted)
* @returns Public URL of the uploaded file
*/
async uploadBase64(
base64DataUri: string,
folder: string = 'images',
filename?: string,
): Promise<string> {
if (!this.isAvailable()) {
this.logger.warn('Storage not available, returning original data URI');
return base64DataUri;
}
// Parse data URI
const matches = base64DataUri.match(
/^data:([a-zA-Z0-9]+\/[a-zA-Z0-9-.+]+);base64,(.+)$/,
);
if (!matches) {
this.logger.warn('Invalid data URI format, returning as-is');
return base64DataUri;
}
const mimeType = matches[1];
const base64Data = matches[2];
const buffer = Buffer.from(base64Data, 'base64');
// Determine extension from MIME type
const ext = this.mimeToExtension(mimeType);
const key = `${folder}/${filename || randomUUID()}.${ext}`;
return this.uploadBuffer(buffer, key, mimeType);
}
/**
* Upload a Buffer to storage
*
* @param buffer - File content buffer
* @param key - Storage key (path/filename)
* @param contentType - MIME type
* @returns Public URL
*/
async uploadBuffer(
buffer: Buffer,
key: string,
contentType: string,
): Promise<string> {
if (!this.isAvailable()) {
throw new Error('Storage service is not available');
}
try {
await this.client!.send(
new PutObjectCommand({
Bucket: this.bucket,
Key: key,
Body: buffer,
ContentType: contentType,
ACL: 'public-read',
}),
);
const url = `${this.publicUrl}/${this.bucket}/${key}`;
this.logger.debug(`Uploaded: ${key} (${buffer.length} bytes) → ${url}`);
return url;
} catch (error) {
this.logger.error(`Upload failed for ${key}`, error);
throw error;
}
}
/**
* Delete a file from storage
*
* @param key - Storage key or full URL
*/
async delete(keyOrUrl: string): Promise<void> {
if (!this.isAvailable()) return;
const key = keyOrUrl.startsWith('http')
? keyOrUrl.replace(`${this.publicUrl}/${this.bucket}/`, '')
: keyOrUrl;
try {
await this.client!.send(
new DeleteObjectCommand({
Bucket: this.bucket,
Key: key,
}),
);
this.logger.debug(`Deleted: ${key}`);
} catch (error) {
this.logger.warn(`Failed to delete ${key}`, error);
}
}
/**
* Upload a segment image (convenience method)
* Converts base64 to URL and stores in images/segments/
*/
async uploadSegmentImage(
base64DataUri: string,
projectId: string,
segmentId: string,
): Promise<string> {
return this.uploadBase64(
base64DataUri,
`images/segments/${projectId}`,
segmentId,
);
}
/**
* Upload a thumbnail image
*/
async uploadThumbnail(
base64DataUri: string,
projectId: string,
): Promise<string> {
return this.uploadBase64(
base64DataUri,
`images/thumbnails`,
projectId,
);
}
// ========== HELPERS ==========
private async ensureBucket(): Promise<void> {
try {
await this.client!.send(
new HeadBucketCommand({ Bucket: this.bucket }),
);
} catch {
this.logger.log(`Creating bucket: ${this.bucket}`);
await this.client!.send(
new CreateBucketCommand({ Bucket: this.bucket }),
);
}
}
private mimeToExtension(mime: string): string {
const map: Record<string, string> = {
'image/png': 'png',
'image/jpeg': 'jpg',
'image/jpg': 'jpg',
'image/webp': 'webp',
'image/gif': 'gif',
'image/svg+xml': 'svg',
'application/pdf': 'pdf',
};
return map[mime] || 'bin';
}
}