I weekly run a lambda function triggered by an Eventbridge rule to move files from a S3 prefix (landing) to a different prefix in the same bucket (hist). The lambda has a timeout of 5 minutes.
It works as expected for small files, but when it comes to objects of 2-3 GB, the lambda takes already 5 minutes to copy and delete each file. It doesn’t show a timeout exception, but it seems to move and delete one file, end, and then run again the same code for the next file, which takes again 5 minutes. It indeed moves the files but the last step of sending a notification is never executed.
This behavior is quite unexpected. Do you know what would be the correct way of handling this? I am thinking of increasing the lambda memory until finding the optimal.
Here is the code and a screenshot of the logs for a better idea.
TIA
export async function handler() {
console.log("Moving objects from ", sourcePrefix, " to ", destinationPrefix)
var today = new Date();
var daysSinceSunday = today.getDay(); // Sunday is 0, Monday is 1, ...
// Sunday from 2 weeks ago
var twoSundaysAgo = new Date(today)
twoSundaysAgo.setDate(today.getDate() - daysSinceSunday - 7);
twoSundaysAgo.setHours(23, 59, 59, 999)
let messageText = `:bucket: *Historical Data Archiver ${twoSundaysAgo.toISOString().slice(0, 10)}* n` +
`*Bucket:* `${bucketName}`n` +
`*S3 Prefix Source:* `${sourcePrefix}`n` +
`*S3 Prefix Destination:* `${destinationPrefix}`n`
const webhookUrl = await getSecretValue(secretNameWebhook, keyNameWebhook)
try {
const listObjectsCommand = new ListObjectsCommand({
Bucket: bucketName,
Prefix: sourcePrefix,
})
const listedObjects = await s3.send(listObjectsCommand)
if (!listedObjects.Contents) {
console.log('0 objects eligible for archival')
messageText += `:eight_spoked_asterisk: *Status*: 0 objects eligible for archivaln`
} else {
let numObjects = 0
//Loop objects
for (const object of listedObjects.Contents) {
const objectDate = new Date(object.LastModified)
//Remove objects from two Sundays ago and before
if (objectDate <= twoSundaysAgo) {
const copyCommand = new CopyObjectCommand({
Bucket: bucketName,
CopySource: `${bucketName}/${object.Key}`,
Key: object.Key.replace(sourcePrefix, destinationPrefix),
})
//copy to hist path
await s3.send(copyCommand)
const deleteCommand = new DeleteObjectCommand({ Bucket: bucketName, Key: object.Key })
//delete from origin path
await s3.send(deleteCommand)
console.log('Successfully moved object:', object.Key)
numObjects += 1
}
}
//0 objects were moved
if(numObjects == 0){
messageText += `:eight_spoked_asterisk: *Status*: 0 objects eligible for archivaln`
}else{
messageText += `:white_check_mark: *Status*: Successfully moved `${numObjects}` objects.n`
}
}
//Everything went smoothly, send message to slack
const fallbackMessage = `New transition objects from landing to hist S3`
await sendTextToSlackBasic(messageText, fallbackMessage, green, webhookUrl)
} catch (error) {
console.error(`Error moving objects: ${error}`)
messageText += `:x: *Error*: `${error.message}`n`
const fallbackMessage = `Error hist S3: ${error.message}`
await sendTextToSlackBasic(messageText, fallbackMessage, red, webhookUrl)
throw error
}
}