Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions localstack/plugins.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
from localstack.services.sqs import sqs_listener, sqs_starter
from localstack.services.iam import iam_listener
from localstack.services.infra import (register_plugin, Plugin,
start_s3, start_sns, start_ses, start_apigateway, start_elasticsearch_service, start_lambda,
start_sns, start_ses, start_apigateway, start_elasticsearch_service, start_lambda,
start_redshift, start_firehose, start_cloudwatch, start_dynamodbstreams, start_route53,
start_ssm, start_sts, start_secretsmanager, start_iam, start_cloudwatch_logs)
from localstack.services.kinesis import kinesis_listener, kinesis_starter
Expand All @@ -25,7 +25,7 @@ def register_localstack_plugins():
start=es_starter.start_elasticsearch,
check=es_starter.check_elasticsearch))
register_plugin(Plugin('s3',
start=start_s3,
start=s3_starter.start_s3,
check=s3_starter.check_s3,
listener=s3_listener.UPDATE_S3))
register_plugin(Plugin('sns',
Expand Down
12 changes: 3 additions & 9 deletions localstack/services/infra.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,19 +12,19 @@
import pkgutil
from localstack import constants, config
from localstack.constants import (
ENV_DEV, DEFAULT_REGION, LOCALSTACK_VENV_FOLDER, DEFAULT_PORT_S3_BACKEND,
ENV_DEV, DEFAULT_REGION, LOCALSTACK_VENV_FOLDER,
DEFAULT_PORT_APIGATEWAY_BACKEND, DEFAULT_PORT_SNS_BACKEND, DEFAULT_PORT_IAM_BACKEND)
from localstack.config import USE_SSL
from localstack.utils import common, persistence
from localstack.utils.common import (run, TMP_THREADS, in_ci, run_cmd_safe, get_free_tcp_port,
TIMESTAMP_FORMAT, FuncThread, ShellCommandThread, mkdir, get_service_protocol)
from localstack.utils.analytics import event_publisher
from localstack.services import generic_proxy, install
from localstack.services.es import es_api
from localstack.services.firehose import firehose_api
from localstack.services.awslambda import lambda_api
from localstack.services.dynamodbstreams import dynamodbstreams_api
from localstack.services.es import es_api
from localstack.services.generic_proxy import GenericProxy
from localstack.services.dynamodbstreams import dynamodbstreams_api

# flag to indicate whether signal handlers have been set up already
SIGNAL_HANDLERS_SETUP = False
Expand Down Expand Up @@ -153,12 +153,6 @@ def start_apigateway(port=None, asynchronous=False, update_listener=None):
backend_port=DEFAULT_PORT_APIGATEWAY_BACKEND, update_listener=update_listener)


def start_s3(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_S3
return start_moto_server('s3', port, name='S3', asynchronous=asynchronous,
backend_port=DEFAULT_PORT_S3_BACKEND, update_listener=update_listener)


def start_sns(port=None, asynchronous=False, update_listener=None):
port = port or config.PORT_SNS
return start_moto_server('sns', port, name='SNS', asynchronous=asynchronous,
Expand Down
1 change: 1 addition & 0 deletions localstack/services/s3/s3_listener.py
Original file line number Diff line number Diff line change
Expand Up @@ -481,6 +481,7 @@ def forward_request(self, method, path, data, headers):
# persist this API call to disk
persistence.record('s3', method, path, data, headers)

# parse query params
parsed = urlparse.urlparse(path)
query = parsed.query
path = parsed.path
Expand Down
42 changes: 42 additions & 0 deletions localstack/services/s3/s3_starter.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,20 @@
import sys
import logging
import traceback
from moto.s3 import models as s3_models
from moto.server import main as moto_main
from localstack import config
from localstack.constants import DEFAULT_PORT_S3_BACKEND
from localstack.utils.aws import aws_stack
from localstack.utils.common import wait_for_port_open
from localstack.services.infra import (
get_service_protocol, start_proxy_for_service, do_run, setup_logging)

LOGGER = logging.getLogger(__name__)

# max file size for S3 objects (in MB)
S3_MAX_FILE_SIZE_MB = 128


def check_s3(expect_shutdown=False, print_error=False):
out = None
Expand All @@ -21,3 +30,36 @@ def check_s3(expect_shutdown=False, print_error=False):
assert out is None
else:
assert isinstance(out['Buckets'], list)


def start_s3(port=None, backend_port=None, asynchronous=None, update_listener=None):
port = port or config.PORT_S3
backend_port = DEFAULT_PORT_S3_BACKEND
cmd = 'python "%s" s3 -p %s -H 0.0.0.0' % (__file__, backend_port)
print('Starting mock S3 (%s port %s)...' % (get_service_protocol(), port))
start_proxy_for_service('s3', port, backend_port, update_listener)
env_vars = {'PYTHONPATH': ':'.join(sys.path)}
return do_run(cmd, asynchronous, env_vars=env_vars)


def apply_patches():
s3_models.DEFAULT_KEY_BUFFER_SIZE = S3_MAX_FILE_SIZE_MB * 1024 * 1024

def init(self, name, value, storage='STANDARD', etag=None, is_versioned=False, version_id=0, max_buffer_size=None):
return original_init(self, name, value, storage=storage, etag=etag, is_versioned=is_versioned,
version_id=version_id, max_buffer_size=s3_models.DEFAULT_KEY_BUFFER_SIZE)

original_init = s3_models.FakeKey.__init__
s3_models.FakeKey.__init__ = init


def main():
setup_logging()
# patch moto implementation
apply_patches()
# start API
sys.exit(moto_main())


if __name__ == '__main__':
main()
64 changes: 36 additions & 28 deletions tests/integration/test_s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from io import BytesIO
from localstack import config
from localstack.utils.aws import aws_stack
from localstack.utils.common import short_uid, get_service_protocol, to_bytes, safe_requests, to_str
from localstack.utils.common import (
short_uid, get_service_protocol, to_bytes, safe_requests, to_str, new_tmp_file, rm_rf)

TEST_BUCKET_NAME_WITH_POLICY = 'test_bucket_policy_1'
TEST_BUCKET_WITH_NOTIFICATION = 'test_bucket_notification_1'
Expand Down Expand Up @@ -40,11 +41,11 @@ def test_bucket_policy(self):
Bucket=TEST_BUCKET_NAME_WITH_POLICY,
Policy=json.dumps(policy)
)
assert response['ResponseMetadata']['HTTPStatusCode'] == 204
self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)

# retrieve and check policy config
saved_policy = self.s3_client.get_bucket_policy(Bucket=TEST_BUCKET_NAME_WITH_POLICY)['Policy']
assert json.loads(saved_policy) == policy
self.assertEqual(json.loads(saved_policy), policy)

def test_s3_put_object_notification(self):
key_by_path = 'key-by-hostname'
Expand All @@ -70,13 +71,13 @@ def test_s3_put_object_notification(self):
url = '{}/{}'.format(os.getenv('TEST_S3_URL'), key_by_host)
# verify=False must be set as this test fails on travis because of an SSL error non-existent locally
response = requests.put(url, data='something else', headers=headers, verify=False)
assert response.ok
self.assertTrue(response.ok)

queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url,
AttributeNames=['ApproximateNumberOfMessages'])
message_count = queue_attributes['Attributes']['ApproximateNumberOfMessages']
# the ApproximateNumberOfMessages attribute is a string
assert message_count == '2'
self.assertEqual(message_count, '2')

# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
Expand All @@ -101,35 +102,42 @@ def test_s3_upload_fileobj_with_large_file_notification(self):
# create test bucket
self.s3_client.create_bucket(Bucket=TEST_BUCKET_WITH_NOTIFICATION)
self.s3_client.put_bucket_notification_configuration(Bucket=TEST_BUCKET_WITH_NOTIFICATION,
NotificationConfiguration={'QueueConfigurations': [
{'QueueArn': queue_attributes['Attributes']['QueueArn'],
'Events': ['s3:ObjectCreated:*']}]})
NotificationConfiguration={'QueueConfigurations': [
{'QueueArn': queue_attributes['Attributes']['QueueArn'],
'Events': ['s3:ObjectCreated:*']}]})

# has to be larger than 64MB to be broken up into a multipart upload
large_file = self.generate_large_file(75000000)
file_size = 75000000
large_file = self.generate_large_file(file_size)
download_file = new_tmp_file()
try:
self.s3_client.upload_file(Bucket=TEST_BUCKET_WITH_NOTIFICATION,
Key=large_file.name,
Filename=large_file.name)
queue_attributes = self.sqs_client.get_queue_attributes(QueueUrl=queue_url,
AttributeNames=['ApproximateNumberOfMessages'])
Key=large_file.name, Filename=large_file.name)
queue_attributes = self.sqs_client.get_queue_attributes(
QueueUrl=queue_url, AttributeNames=['ApproximateNumberOfMessages'])
message_count = queue_attributes['Attributes']['ApproximateNumberOfMessages']
# the ApproximateNumberOfMessages attribute is a string
assert message_count == '1'
self.assertEqual(message_count, '1')

# ensure that the first message's eventName is ObjectCreated:CompleteMultipartUpload
messages = self.sqs_client.receive_message(QueueUrl=queue_url, AttributeNames=['All'])
message = json.loads(messages['Messages'][0]['Body'])
assert message['Records'][0]['eventName'] == 'ObjectCreated:CompleteMultipartUpload'
self.assertEqual(message['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload')

# download the file, check file size
self.s3_client.download_file(Bucket=TEST_BUCKET_WITH_NOTIFICATION,
Key=large_file.name, Filename=download_file)
self.assertEqual(os.path.getsize(download_file), file_size)

# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
self.s3_client.delete_object(Bucket=TEST_BUCKET_WITH_NOTIFICATION, Key=large_file.name)
self.s3_client.delete_bucket(Bucket=TEST_BUCKET_WITH_NOTIFICATION)
finally:
# clean up large file
# clean up large files
large_file.close()
os.remove(large_file.name)
rm_rf(large_file.name)
rm_rf(download_file)

def test_s3_multipart_upload_with_small_single_part(self):
# In a multipart upload "Each part must be at least 5 MB in size, except the last part."
Expand All @@ -154,7 +162,7 @@ def test_s3_multipart_upload_with_small_single_part(self):
QueueUrl=queue_url, AttributeNames=['ApproximateNumberOfMessages'])
message_count = queue_attributes['Attributes']['ApproximateNumberOfMessages']
# the ApproximateNumberOfMessages attribute is a string
assert message_count == '1'
self.assertEqual(message_count, '1')

# clean up
self.sqs_client.delete_queue(QueueUrl=queue_url)
Expand All @@ -179,7 +187,7 @@ def test_s3_get_response_default_content_type(self):

# get object and assert headers
response = requests.get(url, verify=False)
assert response.headers['content-type'] == 'binary/octet-stream'
self.assertEqual(response.headers['content-type'], 'binary/octet-stream')
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
self.s3_client.delete_bucket(Bucket=bucket_name)
Expand All @@ -200,7 +208,7 @@ def test_s3_get_response_content_type_same_as_upload(self):

# get object and assert headers
response = requests.get(url, verify=False)
assert response.headers['content-type'] == 'text/html; charset=utf-8'
self.assertEqual(response.headers['content-type'], 'text/html; charset=utf-8')
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
self.s3_client.delete_bucket(Bucket=bucket_name)
Expand All @@ -219,7 +227,7 @@ def test_s3_head_response_content_length_same_as_upload(self):
# get object and assert headers
response = requests.head(url, verify=False)

assert response.headers['content-length'] == str(len(body))
self.assertEqual(response.headers['content-length'], str(len(body)))
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
self.s3_client.delete_bucket(Bucket=bucket_name)
Expand All @@ -241,7 +249,7 @@ def test_s3_delete_response_content_length_zero(self):
# get object and assert headers
response = requests.delete(url, verify=False)

assert response.headers['content-length'] == '0'
self.assertEqual(response.headers['content-length'], '0')
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
self.s3_client.delete_bucket(Bucket=bucket_name)
Expand Down Expand Up @@ -293,11 +301,11 @@ def test_s3_get_response_headers(self):
'get_object', Params={'Bucket': bucket_name, 'Key': object_key}
)
response = requests.get(url, verify=False)
assert response.headers['Date']
assert response.headers['x-amz-delete-marker']
assert response.headers['x-amz-version-id']
assert not response.headers.get('x-amz-id-2')
assert not response.headers.get('x-amz-request-id')
self.assertTrue(response.headers['Date'])
self.assertTrue(response.headers['x-amz-delete-marker'])
self.assertTrue(response.headers['x-amz-version-id'])
self.assertFalse(response.headers.get('x-amz-id-2'))
self.assertFalse(response.headers.get('x-amz-request-id'))
# clean up
self.s3_client.delete_objects(Bucket=bucket_name, Delete={'Objects': [{'Key': object_key}]})
self.s3_client.delete_bucket(Bucket=bucket_name)
Expand Down Expand Up @@ -338,7 +346,7 @@ def test_s3_upload_download_gzip(self):
with gzip.GzipFile(fileobj=download_file_object, mode='rb') as filestream:
downloaded_data = filestream.read().decode('utf-8')

assert downloaded_data == data, '{} != {}'.format(downloaded_data, data)
self.assertEqual(downloaded_data, data, '{} != {}'.format(downloaded_data, data))

def test_set_external_hostname(self):
bucket_name = 'test-bucket-%s' % short_uid()
Expand Down