Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bin/Dockerfile.base
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ ADD requirements.txt .
RUN (pip install --upgrade pip) && \
(test `which virtualenv` || pip install virtualenv || sudo pip install virtualenv) && \
(virtualenv .testvenv && source .testvenv/bin/activate && \
pip install -q six==1.10.0 && pip install -q -r requirements.txt && rm -rf .testvenv) || exit 1
pip install -q 'localstack-ext[full]' && pip install -q -r requirements.txt && rm -rf .testvenv) || exit 1

# add files required to run "make install-web"
ADD Makefile .
Expand Down
5 changes: 4 additions & 1 deletion localstack/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,9 @@
EXTRA_CORS_ALLOWED_HEADERS = os.environ.get('EXTRA_CORS_ALLOWED_HEADERS', '').strip()
EXTRA_CORS_EXPOSE_HEADERS = os.environ.get('EXTRA_CORS_EXPOSE_HEADERS', '').strip()

# whether to disable publishing events to the API
DISABLE_EVENTS = os.environ.get('DISABLE_EVENTS') in TRUE_STRINGS


def has_docker():
try:
Expand Down Expand Up @@ -147,7 +150,7 @@ def is_linux():
'LAMBDA_EXECUTOR', 'LAMBDA_REMOTE_DOCKER', 'LAMBDA_DOCKER_NETWORK', 'LAMBDA_REMOVE_CONTAINERS',
'USE_SSL', 'DEBUG', 'KINESIS_ERROR_PROBABILITY', 'DYNAMODB_ERROR_PROBABILITY', 'PORT_WEB_UI',
'START_WEB', 'DOCKER_BRIDGE_IP', 'DEFAULT_REGION', 'LAMBDA_JAVA_OPTS', 'LOCALSTACK_API_KEY',
'LAMBDA_CONTAINER_REGISTRY', 'TEST_AWS_ACCOUNT_ID']
'LAMBDA_CONTAINER_REGISTRY', 'TEST_AWS_ACCOUNT_ID', 'DISABLE_EVENTS']

for key, value in six.iteritems(DEFAULT_SERVICE_PORTS):
clean_key = key.upper().replace('-', '_')
Expand Down
5 changes: 2 additions & 3 deletions localstack/services/s3/s3_starter.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,8 @@ def s3_update_acls(self, request, query, bucket_name, key_name):
@classmethod
def Bucket_create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
result = create_from_cloudformation_json_orig(resource_name, cloudformation_json, region_name)
tags = cloudformation_json['Properties'].get('Tags', [])
for tag in tags:
result.tags.tag_set.tags.append(s3_models.FakeTag(tag['Key'], tag['Value']))
# remove the bucket from the backend, as our template_deployer will take care of creating the resource
s3_models.s3_backend.buckets.pop(resource_name)
return result

create_from_cloudformation_json_orig = s3_models.FakeBucket.create_from_cloudformation_json
Expand Down
8 changes: 5 additions & 3 deletions localstack/utils/analytics/event_publisher.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import json
import time
from six.moves import queue
from localstack.config import TMP_FOLDER, CONFIG_FILE_PATH
from localstack import config
from localstack.constants import ENV_INTERNAL_TEST_RUN, API_ENDPOINT
from localstack.utils.common import (JsonObject, to_str,
timestamp, short_uid, save_file, FuncThread, load_file)
Expand Down Expand Up @@ -91,11 +91,11 @@ def get_or_create_file(config_file):


def get_config_file_homedir():
return get_or_create_file(CONFIG_FILE_PATH)
return get_or_create_file(config.CONFIG_FILE_PATH)


def get_config_file_tempdir():
return get_or_create_file(os.path.join(TMP_FOLDER, '.localstack'))
return get_or_create_file(os.path.join(config.TMP_FOLDER, '.localstack'))


def get_machine_id():
Expand Down Expand Up @@ -163,6 +163,8 @@ def get_hash(name):

def fire_event(event_type, payload=None):
global SENDER_THREAD
if config.DISABLE_EVENTS:
return
if not SENDER_THREAD:
SENDER_THREAD = FuncThread(poll_and_send_messages, {})
SENDER_THREAD.start()
Expand Down
68 changes: 61 additions & 7 deletions localstack/utils/cloudformation/template_deployer.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,45 @@ def attr_val(val):
return result


def s3_bucket_notification_config(params, **kwargs):
notif_config = params.get('NotificationConfiguration')
if not notif_config:
return None

lambda_configs = []
queue_configs = []
topic_configs = []

attr_tuples = (
('LambdaConfigurations', lambda_configs, 'LambdaFunctionArn', 'Function'),
('QueueConfigurations', queue_configs, 'QueueArn', 'Queue'),
('TopicConfigurations', topic_configs, 'TopicArn', 'Topic')
)

# prepare lambda/queue/topic notification configs
for attrs in attr_tuples:
for config in notif_config.get(attrs[0]) or []:
filter_rules = config.get('Filter', {}).get('S3Key', {}).get('Rules')
entry = {
attrs[2]: config[attrs[3]],
'Events': [config['Event']]
}
if filter_rules:
entry['Filter'] = {'Key': {'FilterRules': filter_rules}}
attrs[1].append(entry)

# construct final result
result = {
'Bucket': params.get('BucketName') or PLACEHOLDER_RESOURCE_NAME,
'NotificationConfiguration': {
'LambdaFunctionConfigurations': lambda_configs,
'QueueConfigurations': queue_configs,
'TopicConfigurations': topic_configs
}
}
return result


def select_parameters(*param_names):
return lambda params, **kwargs: dict([(k, v) for k, v in params.items() if k in param_names])

Expand Down Expand Up @@ -130,14 +169,17 @@ def replace(params, **kwargs):
# maps resource types to functions and parameters for creation
RESOURCE_TO_FUNCTION = {
'S3::Bucket': {
'create': {
'create': [{
'function': 'create_bucket',
'parameters': {
'Bucket': ['BucketName', PLACEHOLDER_RESOURCE_NAME],
'ACL': lambda params, **kwargs: convert_acl_cf_to_s3(params.get('AccessControl', 'PublicRead')),
'CreateBucketConfiguration': lambda params, **kwargs: get_bucket_location_config()
}
},
}, {
'function': 'put_bucket_notification_configuration',
'parameters': s3_bucket_notification_config
}],
'delete': {
'function': 'delete_bucket',
'parameters': {
Expand Down Expand Up @@ -518,10 +560,8 @@ def retrieve_resource_details(resource_id, resource_status, resources, stack_nam
resource_props = resource.get('Properties')
try:
if resource_type == 'Lambda::Function':
resource_props['FunctionName'] = resource_props.get('FunctionName',
'{}-lambda-{}'.format(
stack_name[:45], common.short_uid()))

resource_props['FunctionName'] = (resource_props.get('FunctionName') or
'{}-lambda-{}'.format(stack_name[:45], common.short_uid()))
resource_id = resource_props['FunctionName'] if resource else resource_id
return aws_stack.connect_to_service('lambda').get_function(FunctionName=resource_id)
elif resource_type == 'Lambda::Version':
Expand Down Expand Up @@ -621,7 +661,17 @@ def retrieve_resource_details(resource_id, resource_status, resources, stack_nam
elif resource_type == 'S3::Bucket':
bucket_name = resource_props.get('BucketName') or resource_id
bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources)
return aws_stack.connect_to_service('s3').get_bucket_location(Bucket=bucket_name)
s3_client = aws_stack.connect_to_service('s3')
response = s3_client.get_bucket_location(Bucket=bucket_name)
notifs = resource_props.get('NotificationConfiguration')
if not response or not notifs:
return response
configs = s3_client.get_bucket_notification_configuration(Bucket=bucket_name)
has_notifs = (configs.get('TopicConfigurations') or configs.get('QueueConfigurations') or
configs.get('LambdaFunctionConfigurations'))
if notifs and not has_notifs:
return None
return response
elif resource_type == 'S3::BucketPolicy':
bucket_name = resource_props.get('Bucket') or resource_id
bucket_name = resolve_refs_recursively(stack_name, bucket_name, resources)
Expand Down Expand Up @@ -941,6 +991,10 @@ def fix_placeholders(o, **kwargs):
# assign default values if empty
params = common.merge_recursive(defaults, params)

# this is an indicator that we should skip this resource deployment, and return
if params is None:
return

# convert refs and boolean strings
for param_key, param_value in dict(params).items():
if param_value is not None:
Expand Down
13 changes: 13 additions & 0 deletions tests/integration/templates/template1.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,19 @@ Resources:
Properties:
AccessControl: PublicRead
BucketName: cf-test-bucket-1
NotificationConfiguration:
LambdaConfigurations:
- Event: "s3:ObjectCreated:*"
Function: aws:arn:lambda:test:testfunc
QueueConfigurations:
- Event: "s3:ObjectDeleted:*"
Queue: aws:arn:sqs:test:testqueue
Filter:
S3Key:
S3KeyFilter:
Rules:
- { Name: name1, Value: value1 }
- { Name: name2, Value: value2 }
Tags:
- Key: foobar
Value:
Expand Down
9 changes: 9 additions & 0 deletions tests/integration/test_cloudformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,15 @@ def check_stack():
self.assertIn('Tags', queue_tags)
self.assertEqual(queue_tags['Tags'], {'key1': 'value1', 'key2': 'value2'})

# assert that bucket notifications have been created
notifs = s3.get_bucket_notification_configuration(Bucket='cf-test-bucket-1')
self.assertIn('QueueConfigurations', notifs)
self.assertIn('LambdaFunctionConfigurations', notifs)
self.assertEqual(notifs['QueueConfigurations'][0]['QueueArn'], 'aws:arn:sqs:test:testqueue')
self.assertEqual(notifs['QueueConfigurations'][0]['Events'], ['s3:ObjectDeleted:*'])
self.assertEqual(notifs['LambdaFunctionConfigurations'][0]['LambdaFunctionArn'], 'aws:arn:lambda:test:testfunc')
self.assertEqual(notifs['LambdaFunctionConfigurations'][0]['Events'], ['s3:ObjectCreated:*'])

# assert that subscriptions have been created
subs = sns.list_subscriptions()['Subscriptions']
subs = [s for s in subs if (':%s:cf-test-queue-1' % TEST_AWS_ACCOUNT_ID) in s['Endpoint']]
Expand Down