Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,8 @@ LOGGING=true
PORT=4000
INDEXER_URL=https://grants-stack-indexer-v2.gitcoin.co/
OPENAI_API_KEY=your_openai_api_key
NODE_ENV=development
NODE_ENV=development
# Performance tuning
MAX_CONCURRENT_EVALUATIONS=5
EVALUATION_BATCH_SIZE=25
EVALUATION_BATCH_DELAY=2000
6 changes: 5 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,11 @@
"typeorm": "^0.3.20",
"typescript": "^5.6.3",
"viem": "^2.21.47",
"winston": "^3.16.0"
"winston": "^3.16.0",
"express-rate-limit": "^6.7.0",
"lodash": "^4.17.21",
"lru-cache": "^7.14.1",
"p-limit": "^3.1.0"
},
"scripts": {
"dev": "ts-node-dev --respawn --transpile-only -r tsconfig-paths/register src/index.ts",
Expand Down
5,767 changes: 5,767 additions & 0 deletions pnpm-lock.yaml

Large diffs are not rendered by default.

239 changes: 136 additions & 103 deletions src/controllers/evaluationController.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,13 +32,32 @@ import type {
PoolIdChainIdBody,
} from './types';
import evaluationQuestionService from '@/service/EvaluationQuestionService';
import { rateLimit } from 'express-rate-limit';

const logger = createLogger();

interface EvaluationBody extends CreateEvaluationParams {
signature: Hex;
}

export const evaluationRateLimiter = rateLimit({
windowMs: 60 * 1000,
max: 20, // 20 requests per minute
message: 'Too many evaluation requests',
});

export const recreateEvaluationQuestionsLimiter = rateLimit({
windowMs: 60 * 1000,
max: 5, // More restrictive for question recreation
message: 'Too many question recreation requests',
});

export const triggerLLMEvaluationLimiter = rateLimit({
windowMs: 60 * 1000,
max: 10, // 10 requests per minute
message: 'Too many LLM evaluation requests',
});

export const recreateEvaluationQuestions = async (
req: Request,
res: Response
Expand Down Expand Up @@ -304,117 +323,131 @@ export const createLLMEvaluations = async (
const roundCache: Record<string, RoundWithApplications> = {};
const failedProjects: string[] = [];

// Split the paramsArray into batches of 10
const batchedParams = batchPromises(paramsArray, 10);
// Increase batch size and add delay between batches
const BATCH_SIZE = 25;
const BATCH_DELAY = 2000; // 2 seconds between batches

// Deduplicate params array based on unique application IDs
const uniqueParams = paramsArray.filter(
(param, index, self) =>
index ===
self.findIndex(
p =>
p.alloApplicationId === param.alloApplicationId &&
p.chainId === param.chainId
)
);

const batchedParams = batchPromises(uniqueParams, BATCH_SIZE);

for (const batch of batchedParams) {
try {
// Process each batch of promises concurrently
const evaluationPromises = batch.map(async params => {
try {
const evaluationQuestions =
params.questions === undefined || params.questions.length === 0
? await evaluationService.getQuestionsByChainAndAlloPoolId(
params.chainId,
params.alloPoolId
)
: params.questions;

if (
evaluationQuestions === null ||
evaluationQuestions.length === 0
) {
logger.error(
'createLLMEvaluations:Failed to get evaluation questions'
);
throw new Error('Failed to get evaluation questions');
}

let roundMetadata = params.roundMetadata;
let applicationMetadata = params.applicationMetadata;

// Check if the round is already in cache
if (roundMetadata == null || applicationMetadata == null) {
let round: RoundWithApplications | null;

// If the round is cached, use it
if (roundCache[params.alloPoolId] != null) {
round = roundCache[params.alloPoolId];
logger.debug(
`Using cached round data for roundId: ${params.alloPoolId}`
);
} else {
// Fetch the round and store it in the cache
const [error, fetchedRound] = await catchError(
indexerClient.getRoundWithApplications({
chainId: params.chainId,
roundId: params.alloPoolId,
})
);

if (error !== undefined || fetchedRound == null) {
logger.error('Failed to fetch round with applications');
throw new Error('Failed to fetch round with applications');
}

round = fetchedRound;
roundCache[params.alloPoolId] = round;
logger.info(
`Fetched and cached round with ID: ${round.id}, which includes ${round.applications.length} applications`
);
}

const application = round.applications.find(
app => app.id === params.alloApplicationId
);
if (application == null) {
logger.error(
`Application with ID: ${params.alloApplicationId} not found in round`
);
throw new NotFoundError(
`Application with ID: ${params.alloApplicationId} not found in round`
);
}

roundMetadata = round.roundMetadata;
applicationMetadata = application.metadata;
}

const evaluation = await requestEvaluation(
roundMetadata,
applicationMetadata,
evaluationQuestions
);

await createEvaluation({
chainId: params.chainId,
alloPoolId: params.alloPoolId,
alloApplicationId: params.alloApplicationId,
cid: params.cid,
evaluator: params.evaluator,
summaryInput: evaluation,
evaluatorType: EVALUATOR_TYPE.LLM_GPT3,
});
} catch (error) {
// If an error occurs, add the project ID to the failedProjects array
failedProjects.push(params.alloApplicationId);
throw error;
}
});

await Promise.all(evaluationPromises);
// Process batch with concurrency limit
await Promise.all(
batch.map(async params => {
await processSingleEvaluation(params, roundCache, failedProjects);
})
);

await new Promise(resolve => setTimeout(resolve, 1000));
// Add delay between batches to prevent overwhelming the system
if (batchedParams.indexOf(batch) < batchedParams.length - 1) {
await new Promise(resolve => setTimeout(resolve, BATCH_DELAY));
}
} catch (batchError) {
// Handle any error within the batch (if any promise fails)
logger.error(
'Error processing batch, skipping to the next one:',
batchError
);
// Continue to the next batch even if an error occurred
logger.error('Error processing batch:', batchError);
continue;
}
}

return failedProjects;
};

async function processSingleEvaluation(
params: CreateLLMEvaluationParams,
roundCache: Record<string, RoundWithApplications>,
failedProjects: string[]
): Promise<void> {
try {
const evaluationQuestions =
params.questions === undefined || params.questions.length === 0
? await evaluationService.getQuestionsByChainAndAlloPoolId(
params.chainId,
params.alloPoolId
)
: params.questions;

if (evaluationQuestions === null || evaluationQuestions.length === 0) {
logger.error('createLLMEvaluations:Failed to get evaluation questions');
throw new Error('Failed to get evaluation questions');
}

let roundMetadata = params.roundMetadata;
let applicationMetadata = params.applicationMetadata;

// Check if the round is already in cache
if (roundMetadata == null || applicationMetadata == null) {
let round: RoundWithApplications | null;

// If the round is cached, use it
if (roundCache[params.alloPoolId] != null) {
round = roundCache[params.alloPoolId];
logger.debug(
`Using cached round data for roundId: ${params.alloPoolId}`
);
} else {
// Fetch the round and store it in the cache
const [error, fetchedRound] = await catchError(
indexerClient.getRoundWithApplications({
chainId: params.chainId,
roundId: params.alloPoolId,
})
);

if (error !== undefined || fetchedRound == null) {
logger.error('Failed to fetch round with applications');
throw new Error('Failed to fetch round with applications');
}

round = fetchedRound;
roundCache[params.alloPoolId] = round;
logger.info(
`Fetched and cached round with ID: ${round.id}, which includes ${round.applications.length} applications`
);
}

const application = round.applications.find(
app => app.id === params.alloApplicationId
);
if (application == null) {
logger.error(
`Application with ID: ${params.alloApplicationId} not found in round`
);
throw new NotFoundError(
`Application with ID: ${params.alloApplicationId} not found in round`
);
}

roundMetadata = round.roundMetadata;
applicationMetadata = application.metadata;
}

const evaluation = await requestEvaluation(
roundMetadata,
applicationMetadata,
evaluationQuestions
);

await createEvaluation({
chainId: params.chainId,
alloPoolId: params.alloPoolId,
alloApplicationId: params.alloApplicationId,
cid: params.cid,
evaluator: params.evaluator,
summaryInput: evaluation,
evaluatorType: EVALUATOR_TYPE.LLM_GPT3,
});
} catch (error) {
failedProjects.push(params.alloApplicationId);
throw error;
}
}
Loading