refactor(chatbot): integrate OpenAI service into chatbot implementations

- Updated various chatbot services (Typebot, Dify, EvolutionBot, Flowise, N8n) to include the OpenAI service for audio transcription capabilities.
- Modified constructors to accept OpenaiService as a dependency, enhancing the ability to transcribe audio messages directly within each service.
- Refactored the handling of `keywordFinish` in multiple controllers and services, changing its type from an array to a string for consistency and simplifying logic.
- Removed redundant audio transcription logic from the base service, centralizing it within the OpenAI service to improve maintainability and reduce code duplication.

This commit focuses on enhancing the chatbot services by integrating OpenAI's transcription capabilities, improving code structure, and ensuring consistent handling of session keywords.
This commit is contained in:
Davidson Gomes 2025-05-21 22:17:10 -03:00
parent 9cedf31eed
commit 6a0fc19702
10 changed files with 124 additions and 90 deletions

View File

@ -13,7 +13,7 @@ import { ChatbotController, ChatbotControllerInterface, EmitData } from './chatb
// Common settings interface for all chatbot integrations
export interface ChatbotSettings {
expire: number;
keywordFinish: string[];
keywordFinish: string;
delayMessage: number;
unknownMessage: string;
listeningFromMe: boolean;
@ -344,7 +344,6 @@ export abstract class BaseChatbotController<BotType = any, BotData extends BaseC
const settings = await this.settingsRepository.create({
data: {
...settingsData,
instanceId: instanceId,
Instance: {
connect: {
id: instanceId,
@ -399,7 +398,7 @@ export abstract class BaseChatbotController<BotType = any, BotData extends BaseC
if (!settings) {
return {
expire: 300,
keywordFinish: ['bye', 'exit', 'quit', 'stop'],
keywordFinish: 'bye',
delayMessage: 1000,
unknownMessage: 'Sorry, I dont understand',
listeningFromMe: true,

View File

@ -2,11 +2,9 @@ import { InstanceDto } from '@api/dto/instance.dto';
import { PrismaRepository } from '@api/repository/repository.service';
import { WAMonitoringService } from '@api/services/monitor.service';
import { Integration } from '@api/types/wa.types';
import { ConfigService, Language } from '@config/env.config';
import { ConfigService } from '@config/env.config';
import { Logger } from '@config/logger.config';
import { IntegrationSession } from '@prisma/client';
import axios from 'axios';
import FormData from 'form-data';
/**
* Base class for all chatbot service implementations
@ -73,46 +71,6 @@ export abstract class BaseChatbotService<BotType = any, SettingsType = any> {
return null;
}
/**
* Transcribes audio to text using OpenAI's Whisper API
*/
protected async speechToText(audioBuffer: Buffer): Promise<string | null> {
if (!this.configService) {
this.logger.error('ConfigService not available for speech-to-text transcription');
return null;
}
try {
// Try to get the API key from process.env directly since ConfigService might not access it correctly
const apiKey = this.configService.get<any>('OPENAI')?.API_KEY || process.env.OPENAI_API_KEY;
if (!apiKey) {
this.logger.error('No OpenAI API key set for Whisper transcription');
return null;
}
const lang = this.configService.get<Language>('LANGUAGE').includes('pt')
? 'pt'
: this.configService.get<Language>('LANGUAGE');
const formData = new FormData();
formData.append('file', audioBuffer, 'audio.ogg');
formData.append('model', 'whisper-1');
formData.append('language', lang);
const response = await axios.post('https://api.openai.com/v1/audio/transcriptions', formData, {
headers: {
...formData.getHeaders(),
Authorization: `Bearer ${apiKey}`,
},
});
return response?.data?.text || null;
} catch (err) {
this.logger.error(`Whisper transcription failed: ${err}`);
return null;
}
}
/**
* Create a new chatbot session
*/
@ -174,12 +132,9 @@ export abstract class BaseChatbotService<BotType = any, SettingsType = any> {
}
// For existing sessions, keywords might indicate the conversation should end
const keywordFinish = (settings as any)?.keywordFinish || [];
const keywordFinish = (settings as any)?.keywordFinish || '';
const normalizedContent = content.toLowerCase().trim();
if (
keywordFinish.length > 0 &&
keywordFinish.some((keyword: string) => normalizedContent === keyword.toLowerCase())
) {
if (keywordFinish.length > 0 && normalizedContent === keywordFinish.toLowerCase()) {
// Update session to closed and return
await this.prismaRepository.integrationSession.update({
where: {

View File

@ -6,13 +6,21 @@ import { Auth, ConfigService, HttpServer } from '@config/env.config';
import { Dify, DifySetting, IntegrationSession } from '@prisma/client';
import { sendTelemetry } from '@utils/sendTelemetry';
import axios from 'axios';
import { downloadMediaMessage } from 'baileys';
import { BaseChatbotService } from '../../base-chatbot.service';
import { OpenaiService } from '../../openai/services/openai.service';
export class DifyService extends BaseChatbotService<Dify, DifySetting> {
constructor(waMonitor: WAMonitoringService, configService: ConfigService, prismaRepository: PrismaRepository) {
private openaiService: OpenaiService;
constructor(
waMonitor: WAMonitoringService,
configService: ConfigService,
prismaRepository: PrismaRepository,
openaiService: OpenaiService,
) {
super(waMonitor, prismaRepository, 'DifyService', configService);
this.openaiService = openaiService;
}
/**
@ -73,10 +81,9 @@ export class DifyService extends BaseChatbotService<Dify, DifySetting> {
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[Dify] Downloading audio for Whisper transcription`);
const mediaBuffer = await downloadMediaMessage({ key: msg.key, message: msg.message }, 'buffer', {});
const transcribedText = await this.speechToText(mediaBuffer);
if (transcribedText) {
payload.query = transcribedText;
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.query = transcription;
} else {
payload.query = '[Audio message could not be transcribed]';
}
@ -151,10 +158,9 @@ export class DifyService extends BaseChatbotService<Dify, DifySetting> {
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[Dify] Downloading audio for Whisper transcription`);
const mediaBuffer = await downloadMediaMessage({ key: msg.key, message: msg.message }, 'buffer', {});
const transcribedText = await this.speechToText(mediaBuffer);
if (transcribedText) {
payload.inputs.query = transcribedText;
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.inputs.query = transcription;
} else {
payload.inputs.query = '[Audio message could not be transcribed]';
}
@ -229,10 +235,9 @@ export class DifyService extends BaseChatbotService<Dify, DifySetting> {
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[Dify] Downloading audio for Whisper transcription`);
const mediaBuffer = await downloadMediaMessage({ key: msg.key, message: msg.message }, 'buffer', {});
const transcribedText = await this.speechToText(mediaBuffer);
if (transcribedText) {
payload.query = transcribedText;
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.query = transcription;
} else {
payload.query = '[Audio message could not be transcribed]';
}

View File

@ -8,10 +8,19 @@ import { sendTelemetry } from '@utils/sendTelemetry';
import axios from 'axios';
import { BaseChatbotService } from '../../base-chatbot.service';
import { OpenaiService } from '../../openai/services/openai.service';
export class EvolutionBotService extends BaseChatbotService<EvolutionBot, EvolutionBotSetting> {
constructor(waMonitor: WAMonitoringService, configService: ConfigService, prismaRepository: PrismaRepository) {
private openaiService: OpenaiService;
constructor(
waMonitor: WAMonitoringService,
configService: ConfigService,
prismaRepository: PrismaRepository,
openaiService: OpenaiService,
) {
super(waMonitor, prismaRepository, 'EvolutionBotService', configService);
this.openaiService = openaiService;
}
/**
@ -50,6 +59,21 @@ export class EvolutionBotService extends BaseChatbotService<EvolutionBot, Evolut
user: remoteJid,
};
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[EvolutionBot] Downloading audio for Whisper transcription`);
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.query = transcription;
} else {
payload.query = '[Audio message could not be transcribed]';
}
} catch (err) {
this.logger.error(`[EvolutionBot] Failed to transcribe audio: ${err}`);
payload.query = '[Audio message could not be transcribed]';
}
}
if (this.isImageMessage(content)) {
const contentSplit = content.split('|');

View File

@ -8,10 +8,18 @@ import { sendTelemetry } from '@utils/sendTelemetry';
import axios from 'axios';
import { BaseChatbotService } from '../../base-chatbot.service';
import { OpenaiService } from '../../openai/services/openai.service';
export class FlowiseService extends BaseChatbotService<Flowise, FlowiseSetting> {
constructor(waMonitor: WAMonitoringService, configService: ConfigService, prismaRepository: PrismaRepository) {
private openaiService: OpenaiService;
constructor(
waMonitor: WAMonitoringService,
configService: ConfigService,
prismaRepository: PrismaRepository,
openaiService: OpenaiService,
) {
super(waMonitor, prismaRepository, 'FlowiseService', configService);
this.openaiService = openaiService;
}
/**
@ -49,6 +57,21 @@ export class FlowiseService extends BaseChatbotService<Flowise, FlowiseSetting>
},
};
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[EvolutionBot] Downloading audio for Whisper transcription`);
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.query = transcription;
} else {
payload.query = '[Audio message could not be transcribed]';
}
} catch (err) {
this.logger.error(`[EvolutionBot] Failed to transcribe audio: ${err}`);
payload.query = '[Audio message could not be transcribed]';
}
}
if (this.isImageMessage(content)) {
const contentSplit = content.split('|');

View File

@ -5,14 +5,21 @@ import { Auth, ConfigService, HttpServer } from '@config/env.config';
import { IntegrationSession, N8n, N8nSetting } from '@prisma/client';
import { sendTelemetry } from '@utils/sendTelemetry';
import axios from 'axios';
import { downloadMediaMessage } from 'baileys';
import { BaseChatbotService } from '../../base-chatbot.service';
import { OpenaiService } from '../../openai/services/openai.service';
import { N8nDto } from '../dto/n8n.dto';
export class N8nService extends BaseChatbotService<N8n, N8nSetting> {
constructor(waMonitor: WAMonitoringService, prismaRepository: PrismaRepository, configService: ConfigService) {
private openaiService: OpenaiService;
constructor(
waMonitor: WAMonitoringService,
prismaRepository: PrismaRepository,
configService: ConfigService,
openaiService: OpenaiService,
) {
super(waMonitor, prismaRepository, 'N8nService', configService);
this.openaiService = openaiService;
}
/**
@ -135,10 +142,9 @@ export class N8nService extends BaseChatbotService<N8n, N8nSetting> {
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[N8n] Downloading audio for Whisper transcription`);
const mediaBuffer = await downloadMediaMessage({ key: msg.key, message: msg.message }, 'buffer', {});
const transcribedText = await this.speechToText(mediaBuffer);
if (transcribedText) {
payload.chatInput = transcribedText;
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
payload.chatInput = transcription;
} else {
payload.chatInput = '[Audio message could not be transcribed]';
}

View File

@ -176,7 +176,7 @@ export class OpenaiController extends BaseChatbotController<OpenaiBot, OpenaiDto
await this.settings(instance, {
openaiCredsId: data.openaiCredsId,
expire: data.expire || 300,
keywordFinish: data.keywordFinish || 'bye,exit,quit,stop',
keywordFinish: data.keywordFinish || 'bye',
delayMessage: data.delayMessage || 1000,
unknownMessage: data.unknownMessage || 'Sorry, I dont understand',
listeningFromMe: data.listeningFromMe !== undefined ? data.listeningFromMe : true,
@ -385,7 +385,7 @@ export class OpenaiController extends BaseChatbotController<OpenaiBot, OpenaiDto
});
// Convert keywordFinish to string if it's an array
const keywordFinish = Array.isArray(data.keywordFinish) ? data.keywordFinish.join(',') : data.keywordFinish;
const keywordFinish = data.keywordFinish;
// Additional OpenAI-specific fields
const settingsData = {

View File

@ -114,12 +114,9 @@ export class OpenaiService extends BaseChatbotService<OpenaiBot, OpenaiSetting>
}
// Handle keyword finish
const keywordFinish = settings?.keywordFinish?.split(',') || [];
const keywordFinish = settings?.keywordFinish || '';
const normalizedContent = content.toLowerCase().trim();
if (
keywordFinish.length > 0 &&
keywordFinish.some((keyword: string) => normalizedContent === keyword.toLowerCase().trim())
) {
if (keywordFinish.length > 0 && normalizedContent === keywordFinish.toLowerCase()) {
if (settings?.keepOpen) {
await this.prismaRepository.integrationSession.update({
where: {

View File

@ -6,10 +6,19 @@ import { sendTelemetry } from '@utils/sendTelemetry';
import axios from 'axios';
import { BaseChatbotService } from '../../base-chatbot.service';
import { OpenaiService } from '../../openai/services/openai.service';
export class TypebotService extends BaseChatbotService<TypebotModel, any> {
constructor(waMonitor: WAMonitoringService, configService: ConfigService, prismaRepository: PrismaRepository) {
private openaiService: OpenaiService;
constructor(
waMonitor: WAMonitoringService,
configService: ConfigService,
prismaRepository: PrismaRepository,
openaiService: OpenaiService,
) {
super(waMonitor, prismaRepository, 'TypebotService', configService);
this.openaiService = openaiService;
}
/**
@ -58,7 +67,7 @@ export class TypebotService extends BaseChatbotService<TypebotModel, any> {
// Continue an existing chat
const version = this.configService?.get<Typebot>('TYPEBOT').API_VERSION;
let url: string;
let reqData: {};
let reqData: any;
if (version === 'latest') {
url = `${bot.url}/api/v1/sessions/${session.sessionId.split('-')[1]}/continueChat`;
@ -71,6 +80,21 @@ export class TypebotService extends BaseChatbotService<TypebotModel, any> {
};
}
if (this.isAudioMessage(content) && msg) {
try {
this.logger.debug(`[EvolutionBot] Downloading audio for Whisper transcription`);
const transcription = await this.openaiService.speechToText(msg);
if (transcription) {
reqData.message = transcription;
} else {
reqData.message = '[Audio message could not be transcribed]';
}
} catch (err) {
this.logger.error(`[EvolutionBot] Failed to transcribe audio: ${err}`);
reqData.message = '[Audio message could not be transcribed]';
}
}
const response = await axios.post(url, reqData);
// Process the response and send the messages to WhatsApp

View File

@ -115,23 +115,24 @@ export const channelController = new ChannelController(prismaRepository, waMonit
export const evolutionController = new EvolutionController(prismaRepository, waMonitor);
export const metaController = new MetaController(prismaRepository, waMonitor);
export const baileysController = new BaileysController(waMonitor);
// chatbots
const typebotService = new TypebotService(waMonitor, configService, prismaRepository);
export const typebotController = new TypebotController(typebotService, prismaRepository, waMonitor);
const openaiService = new OpenaiService(waMonitor, prismaRepository, configService);
export const openaiController = new OpenaiController(openaiService, prismaRepository, waMonitor);
const difyService = new DifyService(waMonitor, configService, prismaRepository);
// chatbots
const typebotService = new TypebotService(waMonitor, configService, prismaRepository, openaiService);
export const typebotController = new TypebotController(typebotService, prismaRepository, waMonitor);
const difyService = new DifyService(waMonitor, configService, prismaRepository, openaiService);
export const difyController = new DifyController(difyService, prismaRepository, waMonitor);
const evolutionBotService = new EvolutionBotService(waMonitor, configService, prismaRepository);
const evolutionBotService = new EvolutionBotService(waMonitor, configService, prismaRepository, openaiService);
export const evolutionBotController = new EvolutionBotController(evolutionBotService, prismaRepository, waMonitor);
const flowiseService = new FlowiseService(waMonitor, configService, prismaRepository);
const flowiseService = new FlowiseService(waMonitor, configService, prismaRepository, openaiService);
export const flowiseController = new FlowiseController(flowiseService, prismaRepository, waMonitor);
const n8nService = new N8nService(waMonitor, prismaRepository, configService);
const n8nService = new N8nService(waMonitor, prismaRepository, configService, openaiService);
export const n8nController = new N8nController(n8nService, prismaRepository, waMonitor);
const evoaiService = new EvoaiService(waMonitor, prismaRepository, configService);