This commit is contained in:
cghislai 2025-06-08 05:25:26 +02:00
parent dd4b116bbb
commit 01bb760c9a
10 changed files with 2077 additions and 1921 deletions

View File

@ -72,7 +72,6 @@ export function formatHttpResponse(results: ProcessResult[]): HttpResponse {
workitemsCreated,
filesWritten,
pullRequestUrl: result.pullRequestUrl,
gitPatch: result.gitPatch
};
});

View File

@ -1,435 +0,0 @@
import * as fs from 'fs';
import * as path from 'path';
import { GeminiService } from '../gemini-service';
import { GeminiProjectProcessor } from '../gemini-project-processor';
import { Project } from '../../types';
// Mock dependencies
jest.mock('@google-cloud/vertexai');
jest.mock('fs');
jest.mock('path');
jest.mock('../../config', () => ({
GOOGLE_CLOUD_PROJECT_ID: 'mock-project-id',
GOOGLE_CLOUD_LOCATION: 'mock-location',
GEMINI_MODEL: 'mock-model',
DRY_RUN_SKIP_GEMINI: false
}));
describe('GeminiService', () => {
let geminiService: GeminiService;
let mockGeminiProjectProcessor: jest.Mocked<GeminiProjectProcessor>;
let mockVertexAI: any;
let mockGenerativeModel: any;
let mockChat: any;
beforeEach(() => {
jest.clearAllMocks();
// Create a new instance of GeminiService
geminiService = new GeminiService();
// Mock GeminiProjectProcessor
mockGeminiProjectProcessor = {
getFileContent: jest.fn(),
writeFileContent: jest.fn(),
fileExists: jest.fn(),
listFiles: jest.fn(),
grepFiles: jest.fn(),
deleteFile: jest.fn(),
getCurrentWorkitem: jest.fn().mockReturnValue(null),
project: {} as Project,
projectRepoPath: '/mock/project/repo',
mainRepoPath: '/mock/main/repo',
processProject: jest.fn(),
processWorkitem: jest.fn(),
generateFeatureFile: jest.fn(),
collectRelevantFiles: jest.fn(),
matchesPattern: jest.fn()
} as unknown as jest.Mocked<GeminiProjectProcessor>;
// Mock VertexAI and its methods
mockChat = {
sendMessage: jest.fn()
};
mockGenerativeModel = {
startChat: jest.fn().mockReturnValue(mockChat)
};
mockVertexAI = {
getGenerativeModel: jest.fn().mockReturnValue(mockGenerativeModel)
};
// Replace the VertexAI instance in GeminiService with our mock
(geminiService as any).vertexAI = mockVertexAI;
});
describe('processFunctionCalls', () => {
it('should process getFileContent function call correctly', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'getFileContent',
args: JSON.stringify({ filePath: 'test/file.txt' })
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after function call')
};
// Setup mock implementations
mockGeminiProjectProcessor.getFileContent.mockReturnValue('File content');
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after function call');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.getFileContent).toHaveBeenCalledWith('test/file.txt');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'getFileContent',
response: { result: JSON.stringify('File content') }
}
});
});
it('should process writeFileContent function call correctly', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'writeFileContent',
args: JSON.stringify({
filePath: 'test/file.txt',
content: 'New content'
})
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after function call')
};
// Setup mock implementations
mockGeminiProjectProcessor.writeFileContent.mockImplementation(() => {});
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after function call');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.writeFileContent).toHaveBeenCalledWith(
'test/file.txt',
'New content',
undefined
);
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'writeFileContent',
response: { result: JSON.stringify('File test/file.txt written successfully') }
}
});
});
it('should process fileExists function call correctly', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'fileExists',
args: JSON.stringify({ filePath: 'test/file.txt' })
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after function call')
};
// Setup mock implementations
mockGeminiProjectProcessor.fileExists.mockReturnValue(true);
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after function call');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.fileExists).toHaveBeenCalledWith('test/file.txt');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'fileExists',
response: { result: JSON.stringify(true) }
}
});
});
it('should process listFiles function call correctly', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'listFiles',
args: JSON.stringify({ dirPath: 'test/dir' })
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after function call')
};
// Setup mock implementations
mockGeminiProjectProcessor.listFiles.mockReturnValue(['file1.txt', 'file2.txt']);
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after function call');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.listFiles).toHaveBeenCalledWith('test/dir');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'listFiles',
response: { result: JSON.stringify(['file1.txt', 'file2.txt']) }
}
});
});
it('should process grepFiles function call correctly', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'grepFiles',
args: JSON.stringify({
searchString: 'test',
filePattern: '*.txt'
})
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after function call')
};
// Setup mock implementations
const grepResults = [
{ file: 'file1.txt', line: 10, content: 'test content' },
{ file: 'file2.txt', line: 20, content: 'more test content' }
];
mockGeminiProjectProcessor.grepFiles.mockReturnValue(grepResults);
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after function call');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.grepFiles).toHaveBeenCalledWith('test', '*.txt');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'grepFiles',
response: { result: JSON.stringify(grepResults) }
}
});
});
it('should handle errors in function calls', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'getFileContent',
args: JSON.stringify({ filePath: 'test/file.txt' })
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after error')
};
// Setup mock implementations to throw an error
mockGeminiProjectProcessor.getFileContent.mockImplementation(() => {
throw new Error('File not found');
});
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after error');
// Verify the function was called with correct arguments
expect(mockGeminiProjectProcessor.getFileContent).toHaveBeenCalledWith('test/file.txt');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'getFileContent',
response: { error: 'File not found' }
}
});
});
it('should handle unknown function calls', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'unknownFunction',
args: JSON.stringify({ param: 'value' })
}
],
text: jest.fn().mockReturnValue('Initial response')
};
const mockNextResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Final response after error')
};
// Setup mock implementations
mockChat.sendMessage.mockResolvedValue(mockNextResult);
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Final response after error');
// Verify the chat.sendMessage was called with correct arguments
expect(mockChat.sendMessage).toHaveBeenCalledWith({
functionResponse: {
name: 'unknownFunction',
response: { error: 'Unknown function: unknownFunction' }
}
});
});
it('should return text response if no function calls', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [],
text: jest.fn().mockReturnValue('Text response')
};
// Call the method
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
mockGeminiProjectProcessor
);
// Verify the result
expect(result).toBe('Text response');
// Verify no function calls were made
expect(mockGeminiProjectProcessor.getFileContent).not.toHaveBeenCalled();
expect(mockGeminiProjectProcessor.writeFileContent).not.toHaveBeenCalled();
expect(mockGeminiProjectProcessor.fileExists).not.toHaveBeenCalled();
expect(mockGeminiProjectProcessor.listFiles).not.toHaveBeenCalled();
expect(mockGeminiProjectProcessor.grepFiles).not.toHaveBeenCalled();
// Verify no chat messages were sent
expect(mockChat.sendMessage).not.toHaveBeenCalled();
});
it('should return text response if no geminiProjectProcessor provided', async () => {
// Setup mock responses
const mockResult = {
functionCalls: [
{
name: 'getFileContent',
args: JSON.stringify({ filePath: 'test/file.txt' })
}
],
text: jest.fn().mockReturnValue('Text response')
};
// Call the method without providing geminiProjectProcessor
const result = await (geminiService as any).processFunctionCalls(
mockResult,
mockChat,
undefined
);
// Verify the result
expect(result).toBe('Text response');
// Verify no chat messages were sent
expect(mockChat.sendMessage).not.toHaveBeenCalled();
});
});
});

View File

@ -0,0 +1,517 @@
import * as fs from 'fs';
import * as path from 'path';
import { ModelStreamService } from '../model-stream-service';
import { GeminiProjectProcessor } from '../gemini-project-processor';
import { Workitem } from '../../types';
// Mock dependencies
jest.mock('fs');
jest.mock('path');
jest.mock('../../config', () => ({
get GOOGLE_CLOUD_PROJECT_ID() { return process.env.GOOGLE_CLOUD_PROJECT_ID || 'mock-project-id'; },
get GOOGLE_CLOUD_LOCATION() { return process.env.GOOGLE_CLOUD_LOCATION || 'mock-location'; },
get GEMINI_MODEL() { return process.env.GEMINI_MODEL || 'mock-model'; },
DRY_RUN_SKIP_GEMINI: false
}));
// Mock VertexAI
const mockGenerateContentStream = jest.fn();
const mockGenerateContent = jest.fn();
const mockGetGenerativeModel = jest.fn().mockReturnValue({
generateContentStream: mockGenerateContentStream,
generateContent: mockGenerateContent,
startChat: jest.fn()
});
jest.mock('@google-cloud/vertexai', () => {
return {
VertexAI: jest.fn().mockImplementation(() => {
return {
getGenerativeModel: mockGetGenerativeModel
};
}),
FunctionDeclarationSchemaType: {
OBJECT: 'OBJECT',
STRING: 'STRING'
}
};
});
describe('ModelStreamService', () => {
let modelStreamService: ModelStreamService;
let mockGeminiProjectProcessor: jest.Mocked<GeminiProjectProcessor>;
let mockWorkitem: Workitem;
beforeEach(() => {
jest.clearAllMocks();
// Reset all mocks
mockGenerateContentStream.mockReset();
mockGenerateContent.mockReset();
mockGetGenerativeModel.mockReset();
// Mock config values
process.env.GOOGLE_CLOUD_PROJECT_ID = 'mock-project-id';
process.env.GOOGLE_CLOUD_LOCATION = 'mock-location';
process.env.GEMINI_MODEL = 'mock-model';
// Mock workitem
mockWorkitem = {
name: 'test-workitem',
path: '/mock/path/to/workitem.md',
title: 'Test Workitem',
description: 'This is a test workitem',
isActive: true
};
// Mock GeminiProjectProcessor
mockGeminiProjectProcessor = {
getFileContent: jest.fn(),
writeFileContent: jest.fn(),
fileExists: jest.fn(),
listFiles: jest.fn(),
grepFiles: jest.fn(),
deleteFile: jest.fn(),
getCurrentWorkitem: jest.fn().mockReturnValue(mockWorkitem),
processProject: jest.fn(),
processWorkitem: jest.fn(),
generateFeatureFile: jest.fn(),
collectRelevantFiles: jest.fn(),
matchesPattern: jest.fn()
} as unknown as jest.Mocked<GeminiProjectProcessor>;
// Set up default mock behavior for generateContentStream
mockGetGenerativeModel.mockReturnValue({
generateContentStream: mockGenerateContentStream,
generateContent: mockGenerateContent
});
// Create a new instance of ModelStreamService
modelStreamService = new ModelStreamService(
mockGeminiProjectProcessor,
mockWorkitem
);
});
describe('processModelStream', () => {
it('should process model stream and handle function calls', async () => {
// Set up the mock response for the initial stream
const initialStreamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'getFileContent',
args: JSON.stringify({ filePath: 'test/file.txt' })
}
}
]
}
}
]
}
]
};
// Set up the mock response for the next stream after function call
const nextStreamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'writeFileContent',
args: JSON.stringify({
filePath: 'test/output.txt',
content: 'Test content'
})
}
}
]
}
}
]
}
]
};
// Set up the mock response for the final stream with decision
const finalStreamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'makeDecision',
args: JSON.stringify({
decision: 'create',
reason: 'Created a new file'
})
}
}
]
}
}
]
},
{
candidates: [
{
content: {
parts: [
{ text: 'Processing complete' }
]
}
}
]
}
]
};
// Set up the mock implementations
mockGenerateContentStream
.mockResolvedValueOnce(initialStreamResponse)
.mockResolvedValueOnce(nextStreamResponse)
.mockResolvedValueOnce(finalStreamResponse)
.mockResolvedValue({ stream: [] }); // Add a default empty stream for any additional calls
mockGeminiProjectProcessor.getFileContent.mockReturnValue('Mock file content');
// Call the method
const result = await modelStreamService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Verify the result
expect(result.text).toContain('Processing complete');
expect(result.decision).toBeDefined();
expect(result.decision?.decision).toBe('create');
expect(result.decision?.reason).toBe('Created a new file');
expect(result.filesWritten).toContain('test/output.txt');
expect(result.filesDeleted).toHaveLength(0);
// Verify the function calls
expect(mockGeminiProjectProcessor.getFileContent).toHaveBeenCalledWith('test/file.txt');
expect(mockGeminiProjectProcessor.writeFileContent).toHaveBeenCalledWith(
'test/output.txt',
'Test content',
'test-workitem'
);
// Verify the generateContentStream was called
expect(mockGenerateContentStream).toHaveBeenCalledTimes(4);
});
it('should handle file deletion', async () => {
// Set up the mock response with a deleteFile function call
const streamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'deleteFile',
args: JSON.stringify({ filePath: 'test/file-to-delete.txt' })
}
}
]
}
}
]
}
]
};
// Set up the mock response for the next stream after function call
const nextStreamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'makeDecision',
args: JSON.stringify({
decision: 'delete',
reason: 'Deleted a file'
})
}
}
]
}
}
]
},
{
candidates: [
{
content: {
parts: [
{ text: 'Deletion complete' }
]
}
}
]
}
]
};
// Set up the mock implementations
mockGenerateContentStream
.mockResolvedValueOnce(streamResponse)
.mockResolvedValueOnce(nextStreamResponse)
.mockResolvedValue({ stream: [] }); // Add a default empty stream for any additional calls
mockGeminiProjectProcessor.deleteFile.mockReturnValue('File test/file-to-delete.txt deleted successfully');
// Call the method
const result = await modelStreamService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Verify the result
expect(result.text).toContain('Deletion complete');
expect(result.decision).toBeDefined();
expect(result.decision?.decision).toBe('delete');
expect(result.decision?.reason).toBe('Deleted a file');
expect(result.filesWritten).toHaveLength(0);
expect(result.filesDeleted).toContain('test/file-to-delete.txt');
// Verify the function calls
expect(mockGeminiProjectProcessor.deleteFile).toHaveBeenCalledWith('test/file-to-delete.txt');
// Verify the generateContentStream was called
expect(mockGenerateContentStream).toHaveBeenCalledTimes(3);
});
it('should handle errors in function calls', async () => {
// Set up the mock response with a function call that will fail
const streamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'getFileContent',
args: JSON.stringify({ filePath: 'test/non-existent-file.txt' })
}
}
]
}
}
]
}
]
};
// Set up the mock response for the next stream after error
const nextStreamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
functionCall: {
name: 'makeDecision',
args: JSON.stringify({
decision: 'skip',
reason: 'File not found'
})
}
}
]
}
}
]
},
{
candidates: [
{
content: {
parts: [
{ text: 'Error handled' }
]
}
}
]
}
]
};
// Set up the mock implementations
mockGenerateContentStream
.mockResolvedValueOnce(streamResponse)
.mockResolvedValueOnce(nextStreamResponse)
.mockResolvedValue({ stream: [] }); // Add a default empty stream for any additional calls
mockGeminiProjectProcessor.getFileContent.mockImplementation(() => {
throw new Error('File not found');
});
// Call the method
const result = await modelStreamService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Verify the result
expect(result.text).toContain('Error handled');
expect(result.decision).toBeDefined();
expect(result.decision?.decision).toBe('skip');
expect(result.decision?.reason).toBe('File not found');
expect(result.filesWritten).toHaveLength(0);
expect(result.filesDeleted).toHaveLength(0);
// Verify the function calls
expect(mockGeminiProjectProcessor.getFileContent).toHaveBeenCalledWith('test/non-existent-file.txt');
// Verify the generateContentStream was called
expect(mockGenerateContentStream).toHaveBeenCalledTimes(3);
});
it('should parse decision from text if no makeDecision function call', async () => {
// Set up the mock response with text containing a JSON decision
const streamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{
text: 'Some text before the decision { "decision": "skip", "reason": "No changes needed" } Some text after'
}
]
}
}
]
}
]
};
// Set up the mock implementations
mockGenerateContentStream
.mockResolvedValueOnce(streamResponse)
.mockResolvedValue({ stream: [] }); // Add a default empty stream for any additional calls
// Call the method
const result = await modelStreamService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Verify the result
expect(result.text).toContain('Some text before the decision');
expect(result.decision).toBeDefined();
expect(result.decision?.decision).toBe('skip');
expect(result.decision?.reason).toBe('No changes needed');
expect(result.filesWritten).toHaveLength(0);
expect(result.filesDeleted).toHaveLength(0);
// Verify the generateContentStream was called
expect(mockGenerateContentStream).toHaveBeenCalledTimes(1);
});
it('should handle dry run mode', async () => {
// Create a new service instance with dryRun set to true
const dryRunService = new ModelStreamService(
mockGeminiProjectProcessor,
mockWorkitem,
undefined, // projectId
undefined, // location
undefined, // model
true // dryRun
);
// Call the method
const result = await dryRunService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Verify the result
expect(result.text).toContain('DRY RUN');
expect(result.decision).toBeDefined();
expect(result.decision?.decision).toBe('create');
expect(result.decision?.reason).toBe('This is a mock decision for dry run mode');
expect(result.filesWritten).toHaveLength(0);
expect(result.filesDeleted).toHaveLength(0);
// Verify the generateContentStream was not called
expect(mockGenerateContentStream).not.toHaveBeenCalled();
});
});
describe('getModelResponses', () => {
it('should return all model responses', async () => {
// Set up the mock response with text
const streamResponse = {
stream: [
{
candidates: [
{
content: {
parts: [
{ text: 'Response 1' }
]
}
}
]
},
{
candidates: [
{
content: {
parts: [
{ text: 'Response 2' }
]
}
}
]
}
]
};
// Set up the mock implementations
mockGenerateContentStream
.mockResolvedValueOnce(streamResponse)
.mockResolvedValue({ stream: [] }); // Add a default empty stream for any additional calls
// Call the method
await modelStreamService.processModelStream(
'Test guidelines',
'Test workitem content'
);
// Get the model responses
const responses = modelStreamService.getModelResponses();
// Verify the responses
expect(responses).toHaveLength(2);
expect(responses[0]).toBe('Response 1');
expect(responses[1]).toBe('Response 2');
});
});
});

View File

@ -3,19 +3,17 @@
*/
import * as fs from 'fs';
import * as path from 'path';
import { Project, Workitem, ProcessResult } from '../types';
import { GeminiService } from './gemini-service';
import {ProcessResult, Project, Workitem} from '../types';
import {ProjectService} from './project-service';
import {RepositoryService} from './repository-service';
import {DRY_RUN_SKIP_GEMINI} from '../config';
import {ModelStreamService} from './model-stream-service';
export class GeminiProjectProcessor {
private geminiService: GeminiService;
private projectService: ProjectService;
private repositoryService: RepositoryService;
private project: Project;
private projectRepoPath: string;
private mainRepoPath: string;
private filesWritten: Map<string, string[]> = new Map(); // Map of workitem name to files written
private currentWorkitem: Workitem | null = null; // Track the current workitem being processed
@ -26,8 +24,6 @@ export class GeminiProjectProcessor {
) {
this.project = project;
this.projectRepoPath = projectRepoPath;
this.mainRepoPath = mainRepoPath;
this.geminiService = new GeminiService();
this.projectService = new ProjectService();
this.repositoryService = new RepositoryService();
}
@ -108,7 +104,12 @@ export class GeminiProjectProcessor {
private async processWorkitem(
workitem: Workitem,
projectGuidelines: string
): Promise<{ success: boolean; error?: string; status?: 'skipped' | 'updated' | 'created'; filesWritten?: string[] }> {
): Promise<{
success: boolean;
error?: string;
status?: 'skipped' | 'updated' | 'created';
filesWritten?: string[]
}> {
try {
// Set the current workitem
this.currentWorkitem = workitem;
@ -134,12 +135,49 @@ export class GeminiProjectProcessor {
relevantFiles
);
// Get the list of files written for this workitem
const filesWritten = this.filesWritten.get(workitem.name) || [];
// Track files written and deleted from the ModelStreamService result
const filesWritten = [...result.filesWritten];
const filesDeleted = result.filesDeleted;
// If no files were written, but the workitem is active, consider it skipped
if (filesWritten.length === 0) {
// Add deleted files to the tracking with the "deleted:" prefix
filesDeleted.forEach(file => {
filesWritten.push(`deleted:${file}`);
});
// Update the filesWritten map
this.filesWritten.set(workitem.name, filesWritten);
// Use the model's decision to determine the status if available
if (result.decision) {
console.log(`Using model decision: ${result.decision.decision} for workitem ${workitem.name}`);
// Update status based on the model's decision
switch (result.decision.decision) {
case 'create':
status = 'created';
break;
case 'update':
status = 'updated';
break;
case 'delete':
// Keep status as 'skipped' for delete if no files were actually deleted
if (filesDeleted.length > 0) {
status = 'updated'; // We use 'updated' for deletions too
}
break;
case 'skip':
status = 'skipped';
break;
}
} else {
// Fallback to the old behavior if no decision is available
// If no files were written or deleted, consider it skipped
if (filesWritten.length === 0 && filesDeleted.length === 0) {
status = 'skipped';
} else if (filesWritten.length > 0 || filesDeleted.length > 0) {
// If files were written or deleted, consider it created/updated
status = filesWritten.length > 0 ? 'created' : 'updated';
}
}
// Update the workitem file with implementation log
@ -233,20 +271,25 @@ export class GeminiProjectProcessor {
* @param workitemContent Workitem content
* @param workitemName Name of the workitem
* @param relevantFiles Additional relevant files to include in the prompt
* @returns Generated feature file content
* @returns Object containing the generated text, parsed decision, and files written/deleted
*/
private async generateFeatureFile(
guidelines: string,
workitemContent: string,
workitemName: string,
relevantFiles: Record<string, string> = {}
): Promise<string> {
): Promise<{
text: string;
decision?: { decision: 'create' | 'update' | 'delete' | 'skip'; reason: string };
filesWritten: string[];
filesDeleted: string[];
}> {
const currentDate = new Date().toISOString();
// If dry run is enabled, return a mock feature file
if (DRY_RUN_SKIP_GEMINI) {
console.log(`[DRY RUN] Skipping Gemini API call for generating feature file for ${workitemName}`);
return `# Generated by prompts-to-test-spec on ${currentDate} (DRY RUN)
const mockText = `# Generated by prompts-to-test-spec on ${currentDate} (DRY RUN)
# Source: ${workitemName}
Feature: ${workitemName} (DRY RUN)
@ -258,6 +301,15 @@ Feature: ${workitemName} (DRY RUN)
When the feature file is generated
Then a mock feature file is returned
`;
return {
text: mockText,
decision: {
decision: 'create',
reason: 'This is a mock decision for dry run mode'
},
filesWritten: [],
filesDeleted: []
};
}
console.log(`Using function calling to generate feature file for ${workitemName}`);
@ -268,14 +320,31 @@ Feature: ${workitemName} (DRY RUN)
additionalContext += `\n--- ${filename} ---\n${content}\n`;
}
// Pass this instance as the processor to handle function calls
return await this.geminiService.generateFeatureFile(
// Get the current workitem
const currentWorkitem = this.getCurrentWorkitem();
if (!currentWorkitem) {
throw new Error(`No current workitem set for ${workitemName}`);
}
// Create a new ModelStreamService for this workitem
const modelStreamService = new ModelStreamService(
this,
currentWorkitem
);
// Process the model stream
const result = await modelStreamService.processModelStream(
guidelines,
workitemContent,
workitemName,
this, // Pass the GeminiProjectProcessor instance to handle function calls
additionalContext // Pass additional context from relevant files
additionalContext
);
return {
text: result.text,
decision: result.decision,
filesWritten: result.filesWritten,
filesDeleted: result.filesDeleted
};
}
/**

View File

@ -2,22 +2,43 @@
* Service for handling Gemini API operations
*/
import {
VertexAI,
FunctionDeclaration,
Tool,
FunctionDeclarationSchemaType,
GenerateContentRequest
GenerateContentCandidate,
GenerateContentRequest,
Tool,
VertexAI
} from '@google-cloud/vertexai';
import * as fs from 'fs';
import * as path from 'path';
import {Project, Workitem} from '../types';
import {
GOOGLE_CLOUD_PROJECT_ID,
GOOGLE_CLOUD_LOCATION,
GEMINI_MODEL,
DRY_RUN_SKIP_GEMINI,
GOOGLE_API_KEY
} from '../config';
import {Workitem} from '../types';
import {DRY_RUN_SKIP_GEMINI, GEMINI_MODEL, GOOGLE_CLOUD_LOCATION, GOOGLE_CLOUD_PROJECT_ID} from '../config';
import {GeminiProjectProcessor} from './gemini-project-processor';
/**
* Interface for the model response format
*/
interface ModelResponse {
decision: 'create' | 'update' | 'delete' | 'skip';
reason: string;
}
/**
* Interface for the result returned by generateFeatureFile
*/
interface GenerateFeatureFileResult {
text: string;
decision?: ModelResponse;
}
/**
* Interface for function arguments
*/
interface FunctionArgs {
filePath?: string;
content?: string;
dirPath?: string;
searchString?: string;
filePattern?: string;
}
export class GeminiService {
private vertexAI: VertexAI;
@ -26,6 +47,12 @@ export class GeminiService {
private location: string;
private fileOperationTools: Tool[];
/**
* Create a new GeminiService instance
* @param projectId Google Cloud project ID (defaults to GOOGLE_CLOUD_PROJECT_ID from config)
* @param location Google Cloud location (defaults to GOOGLE_CLOUD_LOCATION from config)
* @param model Gemini model to use (defaults to GEMINI_MODEL from config)
*/
constructor(projectId?: string, location?: string, model?: string) {
this.projectId = projectId || GOOGLE_CLOUD_PROJECT_ID;
this.location = location || GOOGLE_CLOUD_LOCATION;
@ -147,623 +174,14 @@ export class GeminiService {
];
}
/**
* Generate feature file content using Gemini API with streaming
* @param guidelines Project guidelines
* @param workitemContent Workitem content
* @param workitemName Name of the workitem
* @param geminiProjectProcessor Optional GeminiProjectProcessor to handle function calls
* @param additionalContext Optional additional context from relevant files
* @returns Generated feature file content
*/
async generateFeatureFile(
guidelines: string,
workitemContent: string,
workitemName: string,
geminiProjectProcessor?: any,
additionalContext: string = ''
): Promise<string> {
const currentDate = new Date().toISOString();
// If dry run is enabled, return a mock feature file
if (DRY_RUN_SKIP_GEMINI) {
console.log(`[DRY RUN] Skipping Gemini API call for generating feature file for ${workitemName}`);
return `# Generated by prompts-to-test-spec on ${currentDate} (DRY RUN)
# Source: ${workitemName}
Feature: ${workitemName} (DRY RUN)
This is a mock feature file generated during dry run.
No actual Gemini API call was made.
Scenario: Mock scenario
Given a dry run is enabled
When the feature file is generated
Then a mock feature file is returned
`;
}
// Create the prompt
const prompt = `
${guidelines}
Workitem:
${workitemContent}
You are tasked with implementing the workitem in the project repository according to the guidelines provided.
You have full control over how to implement the workitem, and you can decide what actions to take.
Include the following comment at the top of any generated files:
# Generated by prompts-to-test-spec on ${currentDate}
# Source: ${workitemName}
You have access to the following function calls to help you understand the project structure and create implementations:
- getFileContent(filePath): Get the content of a file in the project repository
- writeFileContent(filePath, content): Write content to a file in the project repository
- fileExists(filePath): Check if a file exists in the project repository
- listFiles(dirPath): List files in a directory in the project repository
- grepFiles(searchString, filePattern): Search for a string in project files, optionally filtered by a file pattern
- deleteFile(filePath): Delete a file from the project repository
You can decide whether to create, update, delete or skip implementing this workitem based on your analysis.
In your response, just include your decision with a short motivation in json format. For instance:
{ "decision": "create", "reason": "This workitem was not implemented" }
${additionalContext ? `\nAdditional context from project files:${additionalContext}` : ''}
`;
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({
project: this.projectId,
location: this.location,
});
// Instantiate the model with our file operation tools
const generativeModel = vertexAI.getGenerativeModel({
model: this.model,
tools: geminiProjectProcessor ? this.fileOperationTools : undefined,
generation_config: {
temperature: 0.1, // Very low temperature for more deterministic responses
top_p: 0.95, // Higher top_p to allow more diverse completions when needed
top_k: 40, // Consider only the top 40 tokens
},
});
// Create the initial request
const request: GenerateContentRequest = {
contents: [
{role: 'user', parts: [{text: prompt}]}
],
tools: geminiProjectProcessor ? this.fileOperationTools : undefined,
};
// Generate content in a streaming fashion
const streamingResp = await generativeModel.generateContentStream(request);
let finalResponse = '';
let pendingFunctionCalls = [];
// Process the streaming response
for await (const item of streamingResp.stream) {
// Check if there's a function call in any part of the response
let functionCall = null;
let textContent = '';
// Iterate over every part in the response
for (const part of item.candidates?.[0]?.content?.parts || []) {
if (part.functionCall) {
functionCall = part.functionCall;
break;
} else if (part.text) {
textContent += part.text;
}
}
if (functionCall) {
console.log(`Function call detected: ${functionCall.name}`);
pendingFunctionCalls.push(functionCall);
} else if (textContent) {
// If there's text, append it to the final response
finalResponse += textContent;
}
}
// Process any function calls that were detected
if (pendingFunctionCalls.length > 0 && geminiProjectProcessor) {
console.log(`Processing ${pendingFunctionCalls.length} function calls from streaming response`);
let currentRequest: GenerateContentRequest = request;
// Process each function call
for (const functionCall of pendingFunctionCalls) {
const functionName = functionCall.name;
const functionArgs = (typeof functionCall.args === 'string' ?
JSON.parse(functionCall.args) : functionCall.args) as {
filePath?: string;
content?: string;
dirPath?: string;
searchString?: string;
filePattern?: string;
};
console.log(`Executing function: ${functionName} with args:`, functionArgs);
let functionResponse;
try {
// Execute the function using the GeminiProjectProcessor
switch (functionName) {
case 'getFileContent':
functionResponse = geminiProjectProcessor.getFileContent(functionArgs.filePath!);
break;
case 'writeFileContent':
// Get the current workitem name from the context
const currentWorkitem = geminiProjectProcessor.getCurrentWorkitem();
geminiProjectProcessor.writeFileContent(functionArgs.filePath!, functionArgs.content!, currentWorkitem?.name);
functionResponse = `File ${functionArgs.filePath} written successfully`;
break;
case 'fileExists':
functionResponse = geminiProjectProcessor.fileExists(functionArgs.filePath!);
break;
case 'listFiles':
functionResponse = geminiProjectProcessor.listFiles(functionArgs.dirPath!);
break;
case 'grepFiles':
functionResponse = geminiProjectProcessor.grepFiles(functionArgs.searchString!, functionArgs.filePattern);
break;
case 'deleteFile':
functionResponse = geminiProjectProcessor.deleteFile(functionArgs.filePath!);
break;
default:
throw new Error(`Unknown function: ${functionName}`);
}
// Create a function response object
const functionResponseObj = {
name: functionName,
response: {result: JSON.stringify(functionResponse)}
};
// Update the request with the function call and response
currentRequest = {
contents: [
...currentRequest.contents,
{
role: 'ASSISTANT',
parts: [
{
functionCall: functionCall
}
]
},
{
role: 'USER',
parts: [
{
functionResponse: functionResponseObj
}
]
}
],
tools: geminiProjectProcessor ? this.fileOperationTools : undefined,
};
// Generate the next response
const nextStreamingResp = await generativeModel.generateContentStream(currentRequest);
// Process the next streaming response
for await (const nextItem of nextStreamingResp.stream) {
let textContent = '';
// Iterate over every part in the response
for (const part of nextItem.candidates?.[0]?.content?.parts || []) {
if (part.text) {
textContent += part.text;
}
}
if (textContent) {
finalResponse += textContent;
}
}
} catch (error) {
console.error(`Error executing function ${functionName}:`, error);
// Create an error response object
const errorResponseObj = {
name: functionName,
response: {error: error instanceof Error ? error.message : String(error)}
};
// Update the request with the function call and error response
currentRequest = {
contents: [
...currentRequest.contents,
{
role: 'ASSISTANT',
parts: [
{
functionCall: functionCall
}
]
},
{
role: 'USER',
parts: [
{
functionResponse: errorResponseObj
}
]
}
],
tools: geminiProjectProcessor ? this.fileOperationTools : undefined,
};
// Generate the next response
const nextStreamingResp = await generativeModel.generateContentStream(currentRequest);
// Process the next streaming response
for await (const nextItem of nextStreamingResp.stream) {
let textContent = '';
// Iterate over every part in the response
for (const part of nextItem.candidates?.[0]?.content?.parts || []) {
if (part.text) {
textContent += part.text;
}
}
if (textContent) {
finalResponse += textContent;
}
}
}
}
}
console.log(`Gemini response for ${workitemName}: ${finalResponse}`);
return finalResponse;
}
/**
* Process function calls in the Gemini response
* @param result The result from Gemini
* @param chat The chat session
* @param geminiProjectProcessor The GeminiProjectProcessor to handle function calls
* @returns The final generated text
*/
private async processFunctionCalls(result: any, chat: any, geminiProjectProcessor?: any): Promise<string> {
// Check if there are function calls in the response
// Function calls can be at the top level or nested within candidates
const functionCalls = result.functionCalls ||
(result.response?.candidates?.[0]?.functionCall ? [result.response.candidates[0].functionCall] : []) ||
(result.candidates?.[0]?.functionCall ? [result.candidates[0].functionCall] : []);
if (functionCalls.length === 0 || !geminiProjectProcessor) {
// No function calls, return the text response
// Access text content from the response structure in @google-cloud/vertexai v0.5.0
return result.candidates?.[0]?.content?.parts?.[0]?.text ||
result.response?.candidates?.[0]?.content?.parts?.[0]?.text || '';
}
console.log(`Processing ${functionCalls.length} function calls from Gemini`);
// Process each function call
for (const functionCall of functionCalls) {
const functionName = functionCall.name;
// Handle both cases: when args is already an object and when it's a string that needs to be parsed
const functionArgs = (typeof functionCall.args === 'string' ?
JSON.parse(functionCall.args) : functionCall.args) as {
filePath?: string;
content?: string;
dirPath?: string;
searchString?: string;
filePattern?: string;
};
console.log(`Executing function: ${functionName} with args:`, functionArgs);
let functionResponse;
try {
// Execute the function using the GeminiProjectProcessor
switch (functionName) {
case 'getFileContent':
functionResponse = geminiProjectProcessor.getFileContent(functionArgs.filePath!);
break;
case 'writeFileContent':
// Get the current workitem name from the context
const currentWorkitem = geminiProjectProcessor.getCurrentWorkitem();
geminiProjectProcessor.writeFileContent(functionArgs.filePath!, functionArgs.content!, currentWorkitem?.name);
functionResponse = `File ${functionArgs.filePath} written successfully`;
break;
case 'fileExists':
functionResponse = geminiProjectProcessor.fileExists(functionArgs.filePath!);
break;
case 'listFiles':
functionResponse = geminiProjectProcessor.listFiles(functionArgs.dirPath!);
break;
case 'grepFiles':
functionResponse = geminiProjectProcessor.grepFiles(functionArgs.searchString!, functionArgs.filePattern);
break;
case 'deleteFile':
functionResponse = geminiProjectProcessor.deleteFile(functionArgs.filePath!);
break;
default:
throw new Error(`Unknown function: ${functionName}`);
}
// Send the function response back to Gemini
const functionResponseObj = {
functionResponse: {
name: functionName,
response: {result: JSON.stringify(functionResponse)}
}
};
const nextResult = await chat.sendMessage(functionResponseObj);
// Recursively process any additional function calls
return this.processFunctionCalls(nextResult, chat, geminiProjectProcessor);
} catch (error) {
console.error(`Error executing function ${functionName}:`, error);
// Send the error back to Gemini
const errorResponseObj = {
functionResponse: {
name: functionName,
response: {error: error instanceof Error ? error.message : String(error)}
}
};
const nextResult = await chat.sendMessage(errorResponseObj);
// Recursively process any additional function calls
return this.processFunctionCalls(nextResult, chat, geminiProjectProcessor);
}
}
// Return the final text response
// Access text content from the response structure in @google-cloud/vertexai v0.5.0
return result.candidates?.[0]?.content?.parts?.[0]?.text || '';
}
/**
* Example of using function calling with streaming content generation
* This method demonstrates how to use the Vertex AI API for function calling in a streaming context
* @param projectId Google Cloud project ID
* @param location Google Cloud location
* @param model Gemini model to use
*/
async functionCallingStreamExample(
projectId: string = this.projectId,
location: string = this.location,
model: string = this.model
): Promise<void> {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({project: projectId, location: location});
// Instantiate the model
const generativeModel = vertexAI.getGenerativeModel({
model: model,
});
// Define the function declaration for the weather function
const functionDeclarations: FunctionDeclaration[] = [
{
name: "get_current_weather",
description: "Get the current weather in a given location",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
location: {
type: FunctionDeclarationSchemaType.STRING,
description: "The city and state, e.g., San Francisco, CA"
}
},
required: ["location"]
}
}
];
// Create a mock function response
const functionResponseParts = [
{
functionResponse: {
name: "get_current_weather",
response: {
temperature: "72",
unit: "fahrenheit",
description: "Sunny"
}
}
}
];
// Create the request with function calling
const request = {
contents: [
{role: 'user', parts: [{text: 'What is the weather in Boston?'}]},
{
role: 'ASSISTANT',
parts: [
{
functionCall: {
name: 'get_current_weather',
args: {location: 'Boston'},
},
},
],
},
{role: 'USER', parts: functionResponseParts},
],
tools: [{function_declarations: functionDeclarations}],
};
// Generate content in a streaming fashion
const streamingResp = await generativeModel.generateContentStream(request);
// Process the streaming response
for await (const item of streamingResp.stream) {
// Iterate over every part in the response
for (const part of item.candidates?.[0]?.content?.parts || []) {
if (part.text) {
console.log(part.text);
}
}
}
}
/**
* Example of using function calling with streaming content generation for file operations
* This method demonstrates how to use the Vertex AI API for file operation function calling in a streaming context
* @param workitemName Name of the workitem
* @param geminiProjectProcessor The GeminiProjectProcessor to handle function calls
*/
async fileOperationsStreamExample(
workitemName: string,
geminiProjectProcessor: any
): Promise<void> {
// Initialize Vertex with your Cloud project and location
const vertexAI = new VertexAI({
project: this.projectId,
location: this.location,
});
// Instantiate the model with our file operation tools
const generativeModel = vertexAI.getGenerativeModel({
model: this.model,
tools: this.fileOperationTools,
});
// Create a prompt that asks the model to check if a file exists and create it if it doesn't
const prompt = `Check if the file 'example.txt' exists and create it with some content if it doesn't.`;
// Create the initial request
const request = {
contents: [
{role: 'user', parts: [{text: prompt}]}
],
tools: this.fileOperationTools,
};
// Generate content in a streaming fashion
const streamingResp = await generativeModel.generateContentStream(request);
// Process the streaming response
for await (const item of streamingResp.stream) {
// Check if there's a function call in any part of the response
let functionCall = null;
// Iterate over every part in the response
for (const part of item.candidates?.[0]?.content?.parts || []) {
if (part.functionCall) {
functionCall = part.functionCall;
break;
}
}
if (functionCall) {
console.log(`Function call detected: ${functionCall.name}`);
// Execute the function
const functionName = functionCall.name;
const functionArgs = functionCall.args as {
filePath?: string;
content?: string;
dirPath?: string;
searchString?: string;
filePattern?: string;
};
console.log(`Executing function: ${functionName} with args:`, functionArgs);
let functionResponse;
try {
// Execute the function using the GeminiProjectProcessor
switch (functionName) {
case 'getFileContent':
functionResponse = geminiProjectProcessor.getFileContent(functionArgs.filePath!);
break;
case 'writeFileContent':
geminiProjectProcessor.writeFileContent(functionArgs.filePath!, functionArgs.content!, workitemName);
functionResponse = `File ${functionArgs.filePath} written successfully`;
break;
case 'fileExists':
functionResponse = geminiProjectProcessor.fileExists(functionArgs.filePath!);
break;
case 'listFiles':
functionResponse = geminiProjectProcessor.listFiles(functionArgs.dirPath!);
break;
case 'grepFiles':
functionResponse = geminiProjectProcessor.grepFiles(functionArgs.searchString!, functionArgs.filePattern);
break;
case 'deleteFile':
functionResponse = geminiProjectProcessor.deleteFile(functionArgs.filePath!);
break;
default:
throw new Error(`Unknown function: ${functionName}`);
}
// Create a new request with the function response
const functionResponseObj = {
name: functionName,
response: {result: JSON.stringify(functionResponse)}
};
// Continue the conversation with the function response
const nextRequest = {
contents: [
{role: 'user', parts: [{text: prompt}]},
{
role: 'ASSISTANT',
parts: [
{
functionCall: functionCall
}
]
},
{
role: 'USER',
parts: [
{
functionResponse: functionResponseObj
}
]
}
],
tools: this.fileOperationTools,
};
// Generate the next response
const nextStreamingResp = await generativeModel.generateContentStream(nextRequest);
// Process the next streaming response
for await (const nextItem of nextStreamingResp.stream) {
// Iterate over every part in the response
for (const part of nextItem.candidates?.[0]?.content?.parts || []) {
if (part.text) {
console.log(part.text);
}
}
}
} catch (error) {
console.error(`Error executing function ${functionName}:`, error);
}
} else {
// If there's no function call, just log the text from all parts
for (const part of item.candidates?.[0]?.content?.parts || []) {
if (part.text) {
console.log(part.text);
}
}
}
}
}
/**
* Generate a pull request description using Gemini API
* @param processedWorkitems List of processed workitems
* @param processedWorkitems List of processed workitems with their status
* @param gitPatch Optional git patch showing code changes
* @returns Generated pull request description
* @returns Generated pull request description in markdown format
* @example
* const geminiService = new GeminiService();
* const prDescription = await geminiService.generatePullRequestDescription(processedWorkitems, gitPatch);
*/
async generatePullRequestDescription(
processedWorkitems: { workitem: Workitem; success: boolean; error?: string }[],

View File

@ -0,0 +1,578 @@
/**
* Service for handling model streams for specific workitems
*/
import {
FunctionDeclarationSchemaType,
GenerateContentCandidate,
GenerateContentRequest,
Tool,
VertexAI
} from '@google-cloud/vertexai';
import { Workitem } from '../types';
import { DRY_RUN_SKIP_GEMINI, GEMINI_MODEL, GOOGLE_CLOUD_LOCATION, GOOGLE_CLOUD_PROJECT_ID } from '../config';
import { GeminiProjectProcessor } from './gemini-project-processor';
/**
* Interface for the model response format
*/
interface ModelResponse {
decision: 'create' | 'update' | 'delete' | 'skip';
reason: string;
}
/**
* Interface for function arguments
*/
interface FunctionArgs {
filePath?: string;
content?: string;
dirPath?: string;
searchString?: string;
filePattern?: string;
decision?: 'create' | 'update' | 'delete' | 'skip';
reason?: string;
}
/**
* Interface for streaming response item
*/
interface StreamResponseItem {
candidates?: GenerateContentCandidate[];
usageMetadata?: any;
}
/**
* Interface for the result of processing a model stream
*/
export interface ModelStreamResult {
text: string;
decision?: ModelResponse;
filesWritten: string[];
filesDeleted: string[];
}
/**
* Service for handling model streams for specific workitems
*/
export class ModelStreamService {
private vertexAI: VertexAI;
private model: string;
private projectId: string;
private location: string;
private fileOperationTools: Tool[];
private geminiProjectProcessor: GeminiProjectProcessor;
private workitem: Workitem;
// State tracking
private filesWritten: string[] = [];
private filesDeleted: string[] = [];
private modelResponses: string[] = [];
private decision?: ModelResponse;
/**
* Create a new ModelStreamService instance
* @param geminiProjectProcessor GeminiProjectProcessor to handle function calls
* @param workitem Workitem being processed
* @param projectId Google Cloud project ID (defaults to GOOGLE_CLOUD_PROJECT_ID from config)
* @param location Google Cloud location (defaults to GOOGLE_CLOUD_LOCATION from config)
* @param model Gemini model to use (defaults to GEMINI_MODEL from config)
*/
constructor(
geminiProjectProcessor: GeminiProjectProcessor,
workitem: Workitem,
projectId?: string,
location?: string,
model?: string,
private dryRun?: boolean
) {
this.geminiProjectProcessor = geminiProjectProcessor;
this.workitem = workitem;
this.projectId = projectId || GOOGLE_CLOUD_PROJECT_ID;
this.location = location || GOOGLE_CLOUD_LOCATION;
this.model = model || GEMINI_MODEL;
if (!this.projectId) {
throw new Error('Google Cloud Project ID is required');
}
// Initialize VertexAI with default authentication
this.vertexAI = new VertexAI({
project: this.projectId,
location: this.location,
});
// Define file operation functions and decision function
this.fileOperationTools = [
{
function_declarations: [
{
name: "getFileContent",
description: "Get the content of a file in the project repository",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
filePath: {
type: FunctionDeclarationSchemaType.STRING,
description: "Path to the file relative to the project repository root"
}
},
required: ["filePath"]
}
},
{
name: "writeFileContent",
description: "Write content to a file in the project repository",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
filePath: {
type: FunctionDeclarationSchemaType.STRING,
description: "Path to the file relative to the project repository root"
},
content: {
type: FunctionDeclarationSchemaType.STRING,
description: "Content to write to the file"
}
},
required: ["filePath", "content"]
}
},
{
name: "fileExists",
description: "Check if a file exists in the project repository",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
filePath: {
type: FunctionDeclarationSchemaType.STRING,
description: "Path to the file relative to the project repository root"
}
},
required: ["filePath"]
}
},
{
name: "listFiles",
description: "List files in a directory in the project repository",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
dirPath: {
type: FunctionDeclarationSchemaType.STRING,
description: "Path to the directory relative to the project repository root"
}
},
required: ["dirPath"]
}
},
{
name: "grepFiles",
description: "Search for a string in project files",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
searchString: {
type: FunctionDeclarationSchemaType.STRING,
description: "String to search for in project files"
},
filePattern: {
type: FunctionDeclarationSchemaType.STRING,
description: "Optional file pattern to limit the search (e.g., '*.ts', 'src/*.java')"
}
},
required: ["searchString"]
}
},
{
name: "deleteFile",
description: "Delete a file from the project repository",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
filePath: {
type: FunctionDeclarationSchemaType.STRING,
description: "Path to the file relative to the project repository root"
}
},
required: ["filePath"]
}
},
{
name: "makeDecision",
description: "State your decision about implementing the workitem",
parameters: {
type: FunctionDeclarationSchemaType.OBJECT,
properties: {
decision: {
type: FunctionDeclarationSchemaType.STRING,
description: "Your decision: 'create', 'update', 'delete', or 'skip'",
enum: ["create", "update", "delete", "skip"]
},
reason: {
type: FunctionDeclarationSchemaType.STRING,
description: "Reason for your decision"
}
},
required: ["decision", "reason"]
}
}
]
}
];
}
/**
* Process a model stream for a workitem
* @param guidelines Project guidelines
* @param workitemContent Workitem content
* @param additionalContext Optional additional context from relevant files
* @returns Object containing the generated text, parsed decision, and files written/deleted
*/
async processModelStream(
guidelines: string,
workitemContent: string,
additionalContext: string = ''
): Promise<ModelStreamResult> {
const currentDate = new Date().toISOString();
// If dry run is enabled, return a mock result
if (this.dryRun || DRY_RUN_SKIP_GEMINI) {
console.log(`[DRY RUN] Skipping Gemini API call for processing workitem ${this.workitem.name}`);
const mockText = `# Generated by prompts-to-test-spec on ${currentDate} (DRY RUN)
# Source: ${this.workitem.name}
Feature: ${this.workitem.name} (DRY RUN)
This is a mock feature file generated during dry run.
No actual Gemini API call was made.
Scenario: Mock scenario
Given a dry run is enabled
When the feature file is generated
Then a mock feature file is returned
`;
return {
text: mockText,
decision: {
decision: 'create',
reason: 'This is a mock decision for dry run mode'
},
filesWritten: [],
filesDeleted: []
};
}
// Create the prompt
const prompt = `
${guidelines}
Workitem:
${workitemContent}
You are tasked with implementing the workitem in the project repository according to the guidelines provided.
You have full control over how to implement the workitem, and you can decide what actions to take.
Include the following comment at the top of any generated files:
# Generated by prompts-to-test-spec on ${currentDate}
# Source: ${this.workitem.name}
You have access to the following function calls to help you understand the project structure and create implementations:
- getFileContent(filePath): Get the content of a file in the project repository
- writeFileContent(filePath, content): Write content to a file in the project repository
- fileExists(filePath): Check if a file exists in the project repository
- listFiles(dirPath): List files in a directory in the project repository
- grepFiles(searchString, filePattern): Search for a string in project files, optionally filtered by a file pattern
- deleteFile(filePath): Delete a file from the project repository
- makeDecision(decision, reason): State your decision about implementing the workitem. Decision must be one of: 'create', 'update', 'delete', 'skip'
You can decide whether to create, update, delete or skip implementing this workitem based on your analysis.
IMPORTANT!!: First use the function calls above to actually implement the workitem. Make all necessary function calls to fully implement the workitem.
After you have implemented the workitem using function calls, use the makeDecision function to state your final decision with a reason.
${additionalContext ? `\nAdditional context from project files:${additionalContext}` : ''}
`;
// Instantiate the model with our file operation tools
const generativeModel = this.vertexAI.getGenerativeModel({
model: this.model,
tools: this.fileOperationTools,
generation_config: {
temperature: 0.1, // Very low temperature for more deterministic responses
top_p: 0.95, // Higher top_p to allow more diverse completions when needed
top_k: 40, // Consider only the top 40 tokens
},
});
// Create the initial request
const request: GenerateContentRequest = {
contents: [
{role: 'user', parts: [{text: prompt}]}
],
tools: this.fileOperationTools,
};
// Generate content in a streaming fashion
const streamingResp = await generativeModel.generateContentStream(request);
let finalResponse = '';
let pendingFunctionCalls = [];
// Process the streaming response
for await (const item of streamingResp.stream) {
// Add debug logging for each item in the model stream
console.log(`[DEBUG] Processing stream item`);
// Avoid stringifying the entire item which can be too complex
if (item.candidates && item.candidates.length > 0) {
console.log(`[DEBUG] Item has ${item.candidates.length} candidates`);
}
// Check if there's a function call in any part of the response
let functionCall = null;
let textContent = '';
// Iterate over every part in the response
for (const part of item.candidates?.[0]?.content?.parts || []) {
if (part.functionCall) {
functionCall = part.functionCall;
console.log(`[DEBUG] Function call detected in stream: ${functionCall.name}`);
break;
} else if (part.text) {
textContent += part.text;
console.log(`[DEBUG] Text content detected in stream: ${textContent.substring(0, 100)}${textContent.length > 100 ? '...' : ''}`);
}
}
if (functionCall) {
console.log(`Function call detected: ${functionCall.name}`);
pendingFunctionCalls.push(functionCall);
} else if (textContent) {
// If there's text, append it to the final response
finalResponse += textContent;
this.modelResponses.push(textContent);
}
}
// Process any function calls that were detected
if (pendingFunctionCalls.length > 0) {
console.log(`Processing ${pendingFunctionCalls.length} function calls from streaming response`);
let currentRequest: GenerateContentRequest = request;
// Process each function call
for (const functionCall of pendingFunctionCalls) {
const functionName = functionCall.name;
const functionArgs = (typeof functionCall.args === 'string' ?
JSON.parse(functionCall.args) : functionCall.args) as FunctionArgs;
console.log(`Executing function: ${functionName} with args:`, functionArgs);
let functionResponse;
try {
// Execute the function
switch (functionName) {
case 'getFileContent':
functionResponse = this.geminiProjectProcessor.getFileContent(functionArgs.filePath!);
break;
case 'writeFileContent':
this.geminiProjectProcessor.writeFileContent(functionArgs.filePath!, functionArgs.content!, this.workitem.name);
functionResponse = `File ${functionArgs.filePath} written successfully`;
// Track the file written
this.filesWritten.push(functionArgs.filePath!);
break;
case 'fileExists':
functionResponse = this.geminiProjectProcessor.fileExists(functionArgs.filePath!);
break;
case 'listFiles':
functionResponse = this.geminiProjectProcessor.listFiles(functionArgs.dirPath!);
break;
case 'grepFiles':
functionResponse = this.geminiProjectProcessor.grepFiles(functionArgs.searchString!, functionArgs.filePattern);
break;
case 'deleteFile':
functionResponse = this.geminiProjectProcessor.deleteFile(functionArgs.filePath!);
// Track the file deleted
this.filesDeleted.push(functionArgs.filePath!);
break;
case 'makeDecision':
// Store the decision
this.decision = {
decision: functionArgs.decision!,
reason: functionArgs.reason!
};
functionResponse = `Decision recorded: ${functionArgs.decision} - ${functionArgs.reason}`;
console.log(`Model decision: ${functionArgs.decision} - ${functionArgs.reason}`);
break;
default:
throw new Error(`Unknown function: ${functionName}`);
}
// Create a function response object
const functionResponseObj = {
name: functionName,
response: {result: JSON.stringify(functionResponse)}
};
// Update the request with the function call and response
currentRequest = {
contents: [
...currentRequest.contents,
{
role: 'ASSISTANT',
parts: [
{
functionCall: functionCall
}
]
},
{
role: 'USER',
parts: [
{
functionResponse: functionResponseObj
}
]
}
],
tools: this.fileOperationTools,
};
// Generate the next response
const nextStreamingResp = await generativeModel.generateContentStream(currentRequest);
// Process the next streaming response
for await (const nextItem of nextStreamingResp.stream) {
console.log(`[DEBUG] Processing next stream item`);
// Avoid stringifying the entire item which can be too complex
if (nextItem.candidates && nextItem.candidates.length > 0) {
console.log(`[DEBUG] Next item has ${nextItem.candidates.length} candidates`);
}
let textContent = '';
let nextFunctionCall = null;
// Iterate over every part in the response
for (const part of nextItem.candidates?.[0]?.content?.parts || []) {
if (part.functionCall) {
nextFunctionCall = part.functionCall;
console.log(`[DEBUG] Function call detected in next stream: ${nextFunctionCall.name}`);
break;
} else if (part.text) {
textContent += part.text;
console.log(`[DEBUG] Text content detected in next stream: ${textContent.substring(0, 100)}${textContent.length > 100 ? '...' : ''}`);
}
}
if (nextFunctionCall) {
// Add to pending function calls to be processed
pendingFunctionCalls.push(nextFunctionCall);
} else if (textContent) {
finalResponse += textContent;
this.modelResponses.push(textContent);
}
}
} catch (error) {
console.error(`Error executing function ${functionName}:`, error);
// Create an error response object
const errorResponseObj = {
name: functionName,
response: {error: error instanceof Error ? error.message : String(error)}
};
// Update the request with the function call and error response
currentRequest = {
contents: [
...currentRequest.contents,
{
role: 'ASSISTANT',
parts: [
{
functionCall: functionCall
}
]
},
{
role: 'USER',
parts: [
{
functionResponse: errorResponseObj
}
]
}
],
tools: this.fileOperationTools,
};
// Generate the next response
const nextStreamingResp = await generativeModel.generateContentStream(currentRequest);
// Process the next streaming response
for await (const nextItem of nextStreamingResp.stream) {
console.log(`[DEBUG] Processing next stream item after error`);
// Avoid stringifying the entire item which can be too complex
if (nextItem.candidates && nextItem.candidates.length > 0) {
console.log(`[DEBUG] Next item after error has ${nextItem.candidates.length} candidates`);
}
let textContent = '';
let nextFunctionCall = null;
// Iterate over every part in the response
for (const part of nextItem.candidates?.[0]?.content?.parts || []) {
if (part.functionCall) {
nextFunctionCall = part.functionCall;
console.log(`[DEBUG] Function call detected in next stream after error: ${nextFunctionCall.name}`);
break;
} else if (part.text) {
textContent += part.text;
console.log(`[DEBUG] Text content detected in next stream after error: ${textContent.substring(0, 100)}${textContent.length > 100 ? '...' : ''}`);
}
}
if (nextFunctionCall) {
// Add to pending function calls to be processed
pendingFunctionCalls.push(nextFunctionCall);
} else if (textContent) {
finalResponse += textContent;
this.modelResponses.push(textContent);
}
}
}
}
}
console.log(`Model stream processing completed for ${this.workitem.name}`);
console.log(`Files written: ${this.filesWritten.length}, Files deleted: ${this.filesDeleted.length}`);
// If no explicit decision was made using the makeDecision function, try to parse it from the text
if (!this.decision) {
try {
// Try to parse a JSON decision from the text
const jsonMatch = finalResponse.match(/\{[\s\S]*"decision"[\s\S]*\}/);
if (jsonMatch) {
this.decision = JSON.parse(jsonMatch[0]) as ModelResponse;
console.log(`Parsed decision from text: ${this.decision.decision}, reason: ${this.decision.reason}`);
}
} catch (error) {
console.error(`Error parsing decision from text: ${error}`);
}
}
return {
text: finalResponse,
decision: this.decision,
filesWritten: this.filesWritten,
filesDeleted: this.filesDeleted
};
}
/**
* Get all model responses collected during processing
* @returns Array of model response texts
*/
getModelResponses(): string[] {
return this.modelResponses;
}
}

View File

@ -2,29 +2,24 @@
* Service for orchestrating the entire process
*/
import * as path from 'path';
import { Project, ProcessResult, RepoCredentials } from '../types';
import {ProcessResult, Project, RepoCredentials} from '../types';
import {RepositoryService} from './repository-service';
import {ProjectService} from './project-service';
import { GeminiService } from './gemini-service';
import {PullRequestService} from './pull-request-service';
import {GeminiProjectProcessor} from './gemini-project-processor';
import {
MAIN_REPO_URL,
validateConfig,
getMainRepoCredentials,
getGithubCredentials,
DRY_RUN_SKIP_COMMITS,
getGiteaCredentials,
GOOGLE_CLOUD_PROJECT_ID,
GOOGLE_CLOUD_LOCATION,
GEMINI_MODEL,
getGithubCredentials,
getMainRepoCredentials,
MAIN_REPO_URL,
USE_LOCAL_REPO,
DRY_RUN_SKIP_COMMITS
validateConfig
} from '../config';
export class ProcessorService {
private repositoryService: RepositoryService;
private projectService: ProjectService;
private geminiService: GeminiService;
private pullRequestService: PullRequestService;
private mainRepoUrl: string;
private mainRepoCredentials: RepoCredentials;
@ -38,11 +33,6 @@ export class ProcessorService {
// Initialize services
this.repositoryService = new RepositoryService();
this.projectService = new ProjectService();
this.geminiService = new GeminiService(
GOOGLE_CLOUD_PROJECT_ID,
GOOGLE_CLOUD_LOCATION,
GEMINI_MODEL
);
this.pullRequestService = new PullRequestService();
// Get main repository URL and credentials only if not using local repo

View File

@ -12,6 +12,7 @@ export class PullRequestService {
constructor() {
this.geminiService = new GeminiService();
}
/**
* Create a pull request for changes in a repository
* @param project Project information
@ -23,7 +24,13 @@ export class PullRequestService {
async createPullRequest(
project: Project,
branchName: string,
processedWorkitems: { workitem: Workitem; success: boolean; error?: string; status?: 'skipped' | 'updated' | 'created'; filesWritten?: string[] }[],
processedWorkitems: {
workitem: Workitem;
success: boolean;
error?: string;
status?: 'skipped' | 'updated' | 'created';
filesWritten?: string[]
}[],
credentials: RepoCredentials,
gitPatch?: string
): Promise<string> {
@ -155,7 +162,13 @@ export class PullRequestService {
* @returns Pull request description
*/
private async generatePullRequestDescription(
processedWorkitems: { workitem: Workitem; success: boolean; error?: string; status?: 'skipped' | 'updated' | 'created'; filesWritten?: string[] }[],
processedWorkitems: {
workitem: Workitem;
success: boolean;
error?: string;
status?: 'skipped' | 'updated' | 'created';
filesWritten?: string[]
}[],
gitPatch?: string
): Promise<string> {
// Use Gemini to generate the pull request description, passing the git patch

View File

@ -5,6 +5,7 @@
- Add comments in the feature file indicating
- The date/time/execution info of the job that created the work item
- The work item prompt file in this directory
- Cumcumber spec should be succinct and deterministic. Avoid words like "should" and "should have", prefer "must" and "must have".
- Inactive work items should have their feature file deleted.
- Updates should be committed to a new branch and a pull request should be created.s
- The pull request should include a short description of the modified code

View File

@ -7,3 +7,9 @@ The nitro-back backend should have a /test endpoint implemented returning the js
- [ ] Jira:
- [ ] Implementation:
- [x] Active
### Log
2025-06-08T03:00:46.571Z - Workitem has been implemented. Created files:
No files were affected.