I have a simple aiogram Telegram bot (works with AWS Lambda) that redirects incoming chat messages to GPT-Turbo and returns its answer to the Telegram bot user's chat.
The issue is that it goes into an infinite loop: the user sends a text message to the bot, and the bot forwards it as a prompt to GPT. While GPT is generating an answer (if it takes over 20 seconds), the Telegram webhook thinks that the bot didn't manage to get the message and sends new request again. Then I get the answer from the message that I've sent first. And it goes over and over again.
Here is what I get with getWebhookInfo:
{"ok":true,"result":{"url":"#myurl","has_custom_certificate":false,"pending_update_count":1,"last_error_date":1687000655,"last_error_message":"Wrong response from the webhook: 504 Gateway Timeout","max_connections":40,"ip_address": #myipaddress}}
And here is my aiogram bot code:
async def gpt_talk(message: types.Message):
user_text = message.text
openai.api_key = settings.GPT_API_KEY
text = 'Here is prompt that will take over 20 seconds to generate'
messages = [{'role': 'system', 'content': 'You are cinema expert'},
{'role': 'user', 'content': text}]
chat_completion = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=messages)
chatgpt_response = chat_completion.choices[0].message.content
await message.reply(chatgpt_response)
bot = Bot(token=settings.BOT_TOKEN)
dp = Dispatcher(bot)
dp.register_message_handler(gpt_talk)
async def process_event(event, dp: Dispatcher):
Bot.set_current(dp.bot)
update = types.Update.to_object(event)
await dp.process_update(update)
async def main(event):
await process_event(event, dp)
return 'ok'
def lambda_handler(event, context):
"""AWS Lambda handler."""
return asyncio.get_event_loop().run_until_complete(main(event))
if __name__ == '__main__':
executor.start_polling(dp, skip_updates=True)