Skip to content

Commit

Permalink
Added s3 storage support (#69)
Browse files Browse the repository at this point in the history
All settings are described in distribution specification:
https://distribution.github.io/distribution/storage-drivers/s3/

Now registry users may choose one of 3 storages: filesystem, swift, s3.
`filesystem` storage is the default option.
  • Loading branch information
alex-ramanau authored Jul 23, 2024
1 parent 5cfdde3 commit 0ccdbd5
Show file tree
Hide file tree
Showing 3 changed files with 304 additions and 0 deletions.
115 changes: 115 additions & 0 deletions config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -154,3 +154,118 @@ options:
type: string
default: "/etc/docker/registry/registry.key"
description: Path to the TLS certificate private key.
storage-s3-accesskey:
type: string
default:
description: |
S3 storage access key. More info about S3 storage configuration could be found here:
https://distribution.github.io/distribution/storage-drivers/s3/ .
storage-s3-secretkey:
type: string
default:
description: S3 storage secret key.
storage-s3-region:
type: string
default:
description: The region containing the S3 service.
storage-s3-regionendpoint:
type: string
default:
description: Endpoint for S3 compatible storage services (Minio, etc).
storage-s3-forcepathstyle:
type: boolean
default: false
description: To enable path-style addressing when the value is set to "true".
storage-s3-bucket:
type: string
default:
description: The bucket name in which you want to store the registry’s data.
storage-s3-encrypt:
type: boolean
default: false
description: Specifies whether the registry stores the image in encrypted format or not.
storage-s3-keyid:
type: string
default:
description: |
Optional KMS key ID to use for encryption
(encrypt must be true, or this parameter is ignored).
storage-s3-secure:
type: boolean
default: true
description: Indicates whether to use HTTPS instead of HTTP.
storage-s3-skip-verify:
type: boolean
default: false
description: Skips TLS verification when the value is set to "true".
storage-s3-skip-v4-auth:
type: boolean
default: true
description: Indicates whether the registry uses Version 4 of AWS’s authentication.
storage-s3-chunksize:
type: int
default: 10485760
description: |
The S3 API requires multipart upload chunks to be at least 5MB.
This value should be a number that is larger than 5 * 1024 * 1024.
storage-s3-multipartcopychunksize:
type: int
default: 33554432
description: |
The default chunk size for all but the last Upload Part in the S3 Multipart Upload
operation when copying stored objects.
storage-s3-multipartcopymaxconcurrency:
type: int
default: 100
description: |
The default maximum number of concurrent Upload Part operations in the
S3 Multipart Upload when copying stored objects.
storage-s3-multipartcopythresholdsize:
type: int
default: 33554432
description: |
The default S3 object size above which multipart copy will be used when copying the
object. Otherwise the object is copied with a single S3 API operation.
storage-s3-rootdirectory:
type: string
default:
description: The root directory tree in which all registry files are stored.
storage-s3-storageclass:
type: string
default: "STANDARD"
description: |
The storage class applied to each registry file.
Valid options are STANDARD and REDUCED_REDUNDANCY.
storage-s3-useragent:
type: string
default:
description: The User-Agent header value for S3 API operations.
storage-s3-usedualstack:
type: boolean
default: false
description: |
Use AWS dual-stack API endpoints which support requests to
S3 buckets over IPv6 and IPv4.
storage-s3-accelerate:
type: boolean
default: false
description: |
Enable S3 transfer acceleration for faster transfers of files over
long distances.
storage-s3-objectacl:
type: string
default: "private"
description: |
The canned object ACL to be applied to each registry object.
If you are using a bucket owned by another AWS account, it is recommended that you set this
to "bucket-owner-full-control" so that the bucket owner can access your objects.
Other valid options are available in the AWS S3 documentation:
https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
storage-s3-loglevel:
type: string
default: "off"
description: |
Valid values are: off (default), debug, debugwithsigning, debugwithhttpbody,
debugwithrequestretries, debugwithrequesterrors and debugwitheventstreambody.
See the AWS SDK for Go API reference for details:
https://docs.aws.amazon.com/sdk-for-go/api/aws/#LogLevelType
39 changes: 39 additions & 0 deletions lib/charms/layer/docker_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,6 +144,45 @@ def configure_registry():
storage['swift'].update({'domain': val})

storage['redirect'] = {'disable': True}
elif (
charm_config.get('storage-s3-region') and
charm_config.get('storage-s3-bucket')
):
storage['s3'] = {
'region': charm_config.get('storage-s3-region'),
'bucket': charm_config.get('storage-s3-bucket'),
'forcepathstyle': charm_config.get('storage-s3-forcepathstyle', False),
'encrypt': charm_config.get('storage-s3-encrypt', False),
'secure': charm_config.get('storage-s3-secure', True),
'skipverify': charm_config.get('storage-s3-skipverify', False),
'v4auth': charm_config.get('storage-s3-v4auth', True),
'chunksize': charm_config.get('storage-s3-chunksize', 10 * 1024 * 1024),
'multipartcopychunksize': charm_config.get(
'storage-s3-multipartcopychunksize', 30 * 1024 * 1024
),
'multipartcopymaxconcurrency': charm_config.get(
'storage-s3-multipartcopymaxconcurrency', 100
),
'multipartcopythresholdsize': charm_config.get(
'storage-s3-multipartcopythresholdsize', 30 * 1024 * 1024
),
'storageclass': charm_config.get('storage-s3-storageclass', 'STANDARD'),
'usedualstack': charm_config.get('storage-s3-usedualstack', False),
'accelerate': charm_config.get('storage-s3-accelerate', False),
'loglevel': charm_config.get('storage-s3-loglevel', 'off'),
}
optional_params = (
'accesskey',
'secretkey',
'regionendpoint',
'keyid',
'useragent',
'rootdirectory',
)
for short_name in optional_params:
full_name = f'storage-s3-{short_name}'
if charm_config.get(full_name):
storage['s3'][short_name] = charm_config.get(full_name)
else:
# If we're not swift, we're local.
container_registry_path = '/var/lib/registry'
Expand Down
150 changes: 150 additions & 0 deletions tests/unit/test_docker_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,153 @@ def test_has_invalid_config(config):
"storage-cache": "bananas",
}
assert "storage-cache" in layer.docker_registry.has_invalid_config()


@mock.patch("os.makedirs", mock.Mock(return_value=0))
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry._configure_local_client")
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry.unitdata")
@mock.patch("charmhelpers.core.hookenv.config")
def test_configure_registry_s3_storage_smoke(config, *args):
config.return_value = {
"log-level": "info",
"storage-s3-region": "ns1",
"storage-s3-bucket": "test_bucket",
}
expected_storage = {
"s3": {
"bucket": "test_bucket",
"region": "ns1",
# "regionendpoint": "https://ns1-region.internal",
"forcepathstyle": False,
"encrypt": False,
"secure": True,
"skipverify": False,
"v4auth": True,
"chunksize": 10485760,
"multipartcopychunksize": 31457280,
"multipartcopymaxconcurrency": 100,
"multipartcopythresholdsize": 31457280,
"storageclass": "STANDARD",
"usedualstack": False,
"accelerate": False,
"loglevel": "off",
},
}
with mock.patch("charms.layer.docker_registry.yaml") as mock_yaml:
layer.docker_registry.configure_registry()
args, _ = mock_yaml.safe_dump.call_args_list[0]
assert 'storage' in args[0]
assert 's3' in args[0]['storage']
actual_storage_config = args[0]['storage']['s3']
assert expected_storage['s3'].items() == actual_storage_config.items()



@mock.patch("os.makedirs", mock.Mock(return_value=0))
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry._configure_local_client")
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry.unitdata")
@mock.patch("charmhelpers.core.hookenv.config")
def test_configure_registry_s3_storage_region_endpoint(config, *args):
config.return_value = {
"log-level": "info",
"storage-s3-region": "ns1",
"storage-s3-regionendpoint": "https://ns1-region.internal",
"storage-s3-bucket": "test_bucket",
}
expected_storage = {
"s3": {
"bucket": "test_bucket",
"region": "ns1",
"regionendpoint": "https://ns1-region.internal",
"forcepathstyle": False,
"encrypt": False,
"secure": True,
"skipverify": False,
"v4auth": True,
"chunksize": 10485760,
"multipartcopychunksize": 31457280,
"multipartcopymaxconcurrency": 100,
"multipartcopythresholdsize": 31457280,
"storageclass": "STANDARD",
"usedualstack": False,
"accelerate": False,
"loglevel": "off",
},
}
with mock.patch("charms.layer.docker_registry.yaml") as mock_yaml:
layer.docker_registry.configure_registry()
args, _ = mock_yaml.safe_dump.call_args_list[0]
assert 'storage' in args[0]
assert 's3' in args[0]['storage']
actual_storage_config = args[0]['storage']['s3']
assert expected_storage['s3'].items() == actual_storage_config.items()


@mock.patch("os.makedirs", mock.Mock(return_value=0))
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry._configure_local_client")
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry.unitdata")
@mock.patch("charmhelpers.core.hookenv.config")
def test_configure_registry_s3_storage_override_default(config, *args):
config.return_value = {
"log-level": "info",
"storage-s3-region": "ns1",
"storage-s3-bucket": "test_bucket",
"storage-s3-forcepathstyle": True,
"storage-s3-multipartcopythresholdsize": 100500,
}
expected_storage = {
"s3": {
"bucket": "test_bucket",
"region": "ns1",
"forcepathstyle": True,
"encrypt": False,
"secure": True,
"skipverify": False,
"v4auth": True,
"chunksize": 10485760,
"multipartcopychunksize": 31457280,
"multipartcopymaxconcurrency": 100,
"multipartcopythresholdsize": 100500,
"storageclass": "STANDARD",
"usedualstack": False,
"accelerate": False,
"loglevel": "off",
},
}
with mock.patch("charms.layer.docker_registry.yaml") as mock_yaml:
layer.docker_registry.configure_registry()
args, _ = mock_yaml.safe_dump.call_args_list[0]
assert 'storage' in args[0]
assert 's3' in args[0]['storage']
actual_storage_config = args[0]['storage']['s3']
assert expected_storage['s3'].items() == actual_storage_config.items()


@mock.patch("os.makedirs", mock.Mock(return_value=0))
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry._configure_local_client")
@mock.patch("charms.layer.docker_registry._write_tls_blobs_to_files")
@mock.patch("charms.layer.docker_registry.unitdata")
@mock.patch("charmhelpers.core.hookenv.config")
def test_configure_registry_default_file_storage(config, *args):
config.return_value = {
"log-level": "info"
}
expected_storage = {
"filesystem": {
"rootdirectory": "/var/lib/registry"
}
}
with mock.patch("charms.layer.docker_registry.yaml") as mock_yaml:
layer.docker_registry.configure_registry()
args, _ = mock_yaml.safe_dump.call_args_list[0]
assert 'storage' in args[0]
assert 'filesystem' in args[0]['storage']
actual_storage_config = args[0]['storage']['filesystem']
assert expected_storage['filesystem'].items() == actual_storage_config.items()

0 comments on commit 0ccdbd5

Please sign in to comment.