Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 93 additions & 36 deletions localstack/services/awslambda/lambda_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
LAMBDA_RUNTIME_PROVIDED)
from localstack.utils.common import (to_str, load_file, save_file, TMP_FILES, ensure_readable,
mkdir, unzip, is_zip_file, zip_contains_jar_entries, run, short_uid, timestamp,
TIMESTAMP_FORMAT_MILLIS, md5, parse_chunked_data, now_utc, safe_requests,
TIMESTAMP_FORMAT_MILLIS, parse_chunked_data, now_utc, safe_requests, FuncThread,
isoformat_milliseconds)
from localstack.utils.analytics import event_publisher
from localstack.utils.aws.aws_models import LambdaFunction
Expand Down Expand Up @@ -98,6 +98,10 @@
JSON_START_TYPES = tuple(set(JSON_START_CHAR_MAP.keys()) - set(POSSIBLE_JSON_TYPES))
JSON_START_CHARS = tuple(set(functools.reduce(lambda x, y: x + y, JSON_START_CHAR_MAP.values())))

# SQS listener thread settings
SQS_LISTENER_THREAD = {}
SQS_POLL_INTERVAL_SEC = 1

# lambda executor instance
LAMBDA_EXECUTOR = lambda_executors.AVAILABLE_EXECUTORS.get(config.LAMBDA_EXECUTOR, lambda_executors.DEFAULT_EXECUTOR)

Expand Down Expand Up @@ -225,7 +229,7 @@ def process_apigateway_invocation(func_arn, path, payload, headers={},
LOG.warning('Unable to run Lambda function on API Gateway message: %s %s' % (e, traceback.format_exc()))


def process_sns_notification(func_arn, topic_arn, subscription_arn, message, message_attributes, subject='',):
def process_sns_notification(func_arn, topic_arn, subscription_arn, message, message_attributes, subject=''):
try:
event = {
'Records': [{
Expand Down Expand Up @@ -277,38 +281,89 @@ def chunks(lst, n):
LOG.warning('Unable to run Lambda function on Kinesis records: %s %s' % (e, traceback.format_exc()))


def process_sqs_message(message_body, message_attributes, queue_name, region_name=None):
def start_lambda_sqs_listener():
if SQS_LISTENER_THREAD:
return

def send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region):
records = []
for msg in messages:
records.append({
'body': msg['Body'],
'receiptHandle': msg['ReceiptHandle'],
'md5OfBody': msg['MD5OfBody'],
'eventSourceARN': queue_arn,
'eventSource': lambda_executors.EVENT_SOURCE_SQS,
'awsRegion': region,
'messageId': msg['MessageId'],
'attributes': msg.get('Attributes', {}),
'messageAttributes': msg.get('MessageAttributes', {}),
'md5OfMessageAttributes': msg.get('MD5OfMessageAttributes'),
'sqs': True,
})
event = {'Records': records}

def delete_messages(result, func_arn, event, error=None, dlq_sent=None, **kwargs):
if error and not dlq_sent:
# Skip deleting messages from the queue in case of processing errors AND if
# the message has not yet been sent to a dead letter queue (DLQ).
# We'll pick them up and retry next time they become available on the queue.
return
sqs_client = aws_stack.connect_to_service('sqs')
entries = [{'Id': r['receiptHandle'], 'ReceiptHandle': r['receiptHandle']} for r in records]
sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=entries)

# TODO implement retries, based on "RedrivePolicy.maxReceiveCount" in the queue settings
run_lambda(event=event, context={}, func_arn=lambda_arn, asynchronous=True, callback=delete_messages)

def listener_loop(*args):
while True:
try:
sources = get_event_sources(source_arn=r'.*:sqs:.*')
if not sources:
# Temporarily disable polling if no event sources are configured
# anymore. The loop will get restarted next time a message
# arrives and if an event source is configured.
SQS_LISTENER_THREAD.pop('_thread_')
return

sqs_client = aws_stack.connect_to_service('sqs')
for source in sources:
try:
queue_arn = source['EventSourceArn']
lambda_arn = source['FunctionArn']
region_name = queue_arn.split(':')[3]
queue_url = aws_stack.sqs_queue_url_for_arn(queue_arn)
result = sqs_client.receive_message(QueueUrl=queue_url)
messages = result.get('Messages')
if not messages:
continue
send_event_to_lambda(queue_arn, queue_url, lambda_arn, messages, region=region_name)
except Exception as e:
LOG.debug('Unable to poll SQS messages for queue %s: %s' % (queue_arn, e))
except Exception:
pass
finally:
time.sleep(SQS_POLL_INTERVAL_SEC)

LOG.debug('Starting SQS message polling thread for Lambda API')
SQS_LISTENER_THREAD['_thread_'] = FuncThread(listener_loop)
SQS_LISTENER_THREAD['_thread_'].start()


def process_sqs_message(queue_name, message_body, message_attributes, region_name=None):
# feed message into the first listening lambda (message should only get processed once)
try:
region_name = region_name or aws_stack.get_region()
queue_arn = aws_stack.sqs_queue_arn(queue_name, region_name=region_name)
sources = get_event_sources(source_arn=queue_arn)
arns = [s.get('FunctionArn') for s in sources]
LOG.debug('Found %s source mappings for event from SQS queue %s: %s' % (len(arns), queue_arn, arns))
source = next(iter(sources), None)
source = (sources or [None])[0]
if not source:
return False
if source:
arn = source['FunctionArn']
event = {'Records': [{
'body': message_body,
'receiptHandle': short_uid(),
'md5OfBody': md5(message_body),
'eventSourceARN': queue_arn,
'eventSource': 'aws:sqs',
'awsRegion': region_name,
'messageId': str(uuid.uuid4()),
'attributes': {
'ApproximateFirstReceiveTimestamp': '{}000'.format(int(time.time())),
'SenderId': TEST_AWS_ACCOUNT_ID,
'ApproximateReceiveCount': '1',
'SentTimestamp': '{}000'.format(int(time.time()))
},
'messageAttributes': message_attributes,
'sqs': True,
}]}
run_lambda(event=event, context={}, func_arn=arn, asynchronous=True)
return True
start_lambda_sqs_listener()
return True
except Exception as e:
LOG.warning('Unable to run Lambda function on SQS messages: %s %s' % (e, traceback.format_exc()))

Expand All @@ -317,25 +372,26 @@ def get_event_sources(func_name=None, source_arn=None):
result = []
for m in event_source_mappings:
if not func_name or (m['FunctionArn'] in [func_name, func_arn(func_name)]):
if _arn_match(mapped=m['EventSourceArn'], occurred=source_arn):
if _arn_match(mapped=m['EventSourceArn'], searched=source_arn):
result.append(m)
return result


def _arn_match(mapped, occurred):
if not occurred or mapped == occurred:
def _arn_match(mapped, searched):
if not searched or mapped == searched:
return True
# Some types of ARNs can end with a path separated by slashes, for
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# example the ARN of a DynamoDB stream is tableARN/stream/ID. It's
# a little counterintuitive that a more specific mapped ARN can
# match a less specific ARN on the event, but some integration tests
# rely on it for things like subscribing to a stream and matching an
# event labeled with the table ARN.
elif mapped.startswith(occurred):
suffix = mapped[len(occurred):]
if re.match(r'^%s$' % searched, mapped):
return True
if mapped.startswith(searched):
suffix = mapped[len(searched):]
return suffix[0] == '/'
else:
return False
return False


def get_function_version(arn, version):
Expand Down Expand Up @@ -374,7 +430,8 @@ def do_update_alias(arn, alias, version, description=None):


@cloudwatched('lambda')
def run_lambda(event, context, func_arn, version=None, suppress_output=False, asynchronous=False):
def run_lambda(event, context, func_arn, version=None, suppress_output=False,
asynchronous=False, callback=None):
if suppress_output:
stdout_ = sys.stdout
stderr_ = sys.stderr
Expand All @@ -388,8 +445,8 @@ def run_lambda(event, context, func_arn, version=None, suppress_output=False, as
return not_found_error(msg='The resource specified in the request does not exist.')
if not context:
context = LambdaContext(func_details, version)
result = LAMBDA_EXECUTOR.execute(func_arn, func_details,
event, context=context, version=version, asynchronous=asynchronous)
result = LAMBDA_EXECUTOR.execute(func_arn, func_details, event, context=context,
version=version, asynchronous=asynchronous, callback=callback)
except Exception as e:
return error_response('Error executing Lambda function %s: %s %s' % (func_arn, e, traceback.format_exc()))
finally:
Expand Down
10 changes: 8 additions & 2 deletions localstack/services/awslambda/lambda_executors.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,26 +94,32 @@ def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}

def execute(self, func_arn, func_details, event, context=None, version=None, asynchronous=False):
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
# return final result
return result

Expand Down
11 changes: 1 addition & 10 deletions localstack/services/s3/s3_listener.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,15 +152,6 @@ def get_event_message(event_name, bucket_name, file_name='testfile.txt', version
}


def queue_url_for_arn(queue_arn):
if '://' in queue_arn:
return queue_arn
sqs_client = aws_stack.connect_to_service('sqs')
parts = queue_arn.split(':')
return sqs_client.get_queue_url(QueueName=parts[5],
QueueOwnerAWSAccountId=parts[4])['QueueUrl']


def send_notifications(method, bucket_name, object_path, version_id):
bucket_name = normalize_bucket_name(bucket_name)
for bucket, notifs in S3_NOTIFICATIONS.items():
Expand Down Expand Up @@ -207,7 +198,7 @@ def send_notification_for_subscriber(notif, bucket_name, object_path, version_id
if notif.get('Queue'):
sqs_client = aws_stack.connect_to_service('sqs')
try:
queue_url = queue_url_for_arn(notif['Queue'])
queue_url = aws_stack.sqs_queue_url_for_arn(notif['Queue'])
sqs_client.send_message(QueueUrl=queue_url, MessageBody=message)
except Exception as e:
LOGGER.warning('Unable to send notification for S3 bucket "%s" to SQS queue "%s": %s' %
Expand Down
47 changes: 10 additions & 37 deletions localstack/services/sqs/sqs_listener.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import re
import uuid
import json
import xmltodict
from moto.sqs.utils import parse_message_attributes
Expand All @@ -10,28 +9,13 @@
from localstack import config
from localstack.config import HOSTNAME_EXTERNAL, SQS_PORT_EXTERNAL
from localstack.utils.aws import aws_stack
from localstack.utils.common import to_str, md5, clone
from localstack.utils.common import to_str, clone
from localstack.utils.analytics import event_publisher
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import ProxyListener


XMLNS_SQS = 'http://queue.amazonaws.com/doc/2012-11-05/'

SUCCESSFUL_SEND_MESSAGE_XML_TEMPLATE = """
<?xml version="1.0"?>
<SendMessageResponse xmlns="%s">
<SendMessageResult>
<MD5OfMessageAttributes>{message_attr_hash}</MD5OfMessageAttributes>
<MD5OfMessageBody>{message_body_hash}</MD5OfMessageBody>
<MessageId>{message_id}</MessageId>
</SendMessageResult>
<ResponseMetadata>
<RequestId>00000000-0000-0000-0000-000000000000</RequestId>
</ResponseMetadata>
</SendMessageResponse>
""".strip() % XMLNS_SQS

# list of valid attribute names, and names not supported by the backend (elasticmq)
VALID_ATTRIBUTE_NAMES = ['DelaySeconds', 'MaximumMessageSize', 'MessageRetentionPeriod',
'ReceiveMessageWaitTimeSeconds', 'RedrivePolicy', 'VisibilityTimeout',
Expand Down Expand Up @@ -64,6 +48,7 @@ def parse_request_data(method, path, data):
# 'Attribute.1.Name': ['Policy'],
# 'Attribute.1.Value': ['...']
# }
# TODO still needed?
def format_message_attributes(data):
prefix = 'MessageAttribute'
names = []
Expand Down Expand Up @@ -204,12 +189,8 @@ def forward_request(self, method, path, data, headers):

if req_data:
action = req_data.get('Action', [None])[0]
if action == 'SendMessage':
new_response = self._send_message(path, data, req_data, headers)
if new_response:
return new_response

elif action == 'SetQueueAttributes':
if action == 'SetQueueAttributes':
queue_url = _queue_url(path, req_data, headers)
forward_attrs = _set_queue_attributes(queue_url, req_data)
if len(req_data) != len(forward_attrs):
Expand Down Expand Up @@ -251,6 +232,10 @@ def return_response(self, method, path, data, headers, response, request_handler
if action == 'GetQueueAttributes':
content_str = _add_queue_attributes(path, req_data, content_str, headers)

# instruct listeners to fetch new SQS message
if action == 'SendMessage':
self._process_sent_message(path, data, req_data, headers)

# patch the response and return the correct endpoint URLs / ARNs
if action in ('CreateQueue', 'GetQueueUrl', 'ListQueues', 'GetQueueAttributes'):
if config.USE_SSL and '<QueueUrl>http://' in content_str:
Expand Down Expand Up @@ -317,7 +302,7 @@ def return_response(self, method, path, data, headers, response, request_handler
# dataType: 'String'
# }
# }

# TODO still needed?
@classmethod
def get_message_attributes_md5(self, req_data):
req_data = clone(req_data)
Expand All @@ -344,24 +329,12 @@ def get_message_attributes_md5(self, req_data):
message_attr_hash = moto_message.attribute_md5
return message_attr_hash

def _send_message(self, path, data, req_data, headers):
def _process_sent_message(self, path, data, req_data, headers):
queue_url = _queue_url(path, req_data, headers)
queue_name = queue_url.rpartition('/')[2]
message_body = req_data.get('MessageBody', [None])[0]
message_attributes = format_message_attributes(req_data)

process_result = lambda_api.process_sqs_message(message_body, message_attributes, queue_name)
if process_result:
# If a Lambda was listening, do not add the message to the queue
new_response = Response()
message_attr_hash = self.get_message_attributes_md5(req_data)
new_response._content = SUCCESSFUL_SEND_MESSAGE_XML_TEMPLATE.format(
message_attr_hash=message_attr_hash,
message_body_hash=md5(message_body),
message_id=str(uuid.uuid4())
)
new_response.status_code = 200
return new_response
lambda_api.process_sqs_message(queue_name, message_body, message_attributes)


# extract the external port used by the client to make the request
Expand Down
Loading