Merge pull request #7051 from betagouv/main

2022-03-16-01
This commit is contained in:
LeSim 2022-03-16 15:38:10 +01:00 committed by GitHub
commit c525274770
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 620 additions and 344 deletions

View file

@ -80,6 +80,7 @@ gem 'sentry-ruby'
gem 'sib-api-v3-sdk' gem 'sib-api-v3-sdk'
gem 'skylight' gem 'skylight'
gem 'spreadsheet_architect' gem 'spreadsheet_architect'
gem 'strong_migrations' # lint database migrations
gem 'typhoeus' gem 'typhoeus'
gem 'warden' gem 'warden'
gem 'webpacker' gem 'webpacker'

View file

@ -691,6 +691,8 @@ GEM
actionpack (>= 5.2) actionpack (>= 5.2)
activesupport (>= 5.2) activesupport (>= 5.2)
sprockets (>= 3.0.0) sprockets (>= 3.0.0)
strong_migrations (0.8.0)
activerecord (>= 5.2)
swd (1.3.0) swd (1.3.0)
activesupport (>= 3) activesupport (>= 3)
attr_required (>= 0.0.5) attr_required (>= 0.0.5)
@ -875,6 +877,7 @@ DEPENDENCIES
spreadsheet_architect spreadsheet_architect
spring spring
spring-commands-rspec spring-commands-rspec
strong_migrations
timecop timecop
typhoeus typhoeus
vcr vcr

View file

@ -4,3 +4,6 @@
require File.expand_path('config/application', __dir__) require File.expand_path('config/application', __dir__)
Rails.application.load_tasks Rails.application.load_tasks
# Alphabetize schema.rb
task 'db:schema:dump': 'strong_migrations:alphabetize_columns'

View file

@ -13,7 +13,7 @@
class Archive < ApplicationRecord class Archive < ApplicationRecord
include AASM include AASM
RETENTION_DURATION = 1.week RETENTION_DURATION = 4.days
has_and_belongs_to_many :groupe_instructeurs has_and_belongs_to_many :groupe_instructeurs

View file

@ -0,0 +1,85 @@
class ArchiveUploader
# see: https://docs.ovh.com/fr/storage/pcs/capabilities-and-limitations/#max_file_size-5368709122-5gb
# officialy it's 5Gb. but let's avoid to reach the exact spot of the limit
# when file size is bigger, active storage expects the chunks + a manifest.
MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING = ENV.fetch('ACTIVE_STORAGE_FILE_SIZE_THRESHOLD_BEFORE_CUSTOM_UPLOAD') { 4.gigabytes }.to_i
def upload
uploaded_blob = create_and_upload_blob
begin
archive.file.purge if archive.file.attached?
rescue ActiveStorage::FileNotFoundError
archive.file.destroy
archive.file.detach
end
archive.reload
ActiveStorage::Attachment.create(
name: 'file',
record_type: 'Archive',
record_id: archive.id,
blob_id: uploaded_blob.id
)
end
private
attr_reader :procedure, :archive, :filepath
def create_and_upload_blob
if active_storage_service_local? || File.size(filepath) < MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING
upload_with_active_storage
else
upload_with_chunking_wrapper
end
end
def active_storage_service_local?
Rails.application.config.active_storage.service == :local
end
def upload_with_active_storage
params = blob_default_params(filepath).merge(io: File.open(filepath),
identify: false)
blob = ActiveStorage::Blob.create_and_upload!(**params)
return blob
end
def upload_with_chunking_wrapper
params = blob_default_params(filepath).merge(byte_size: File.size(filepath),
checksum: Digest::SHA256.file(filepath).hexdigest)
blob = ActiveStorage::Blob.create_before_direct_upload!(**params)
if syscall_to_custom_uploader(blob)
return blob
else
blob.purge
fail "custom archive attachment failed, should it be retried ?"
end
end
# keeps consistency between ActiveStorage api calls (otherwise archives are not storaged in '/archives') :
# - create_and_upload, blob is attached by active storage
# - upload_with_chunking_wrapper, blob is attached by custom script
def blob_default_params(filepath)
{
key: namespaced_object_key,
filename: archive.filename(procedure),
content_type: 'application/zip',
metadata: { virus_scan_result: ActiveStorage::VirusScanner::SAFE }
}
end
# explicitely memoize so it keeps its consistency across many calls (Ex: retry)
def namespaced_object_key
@namespaced_object_key ||= "archives/#{Date.today.strftime("%Y-%m-%d")}/#{SecureRandom.uuid}"
end
def syscall_to_custom_uploader(blob)
system(ENV.fetch('ACTIVE_STORAGE_BIG_FILE_UPLOADER_WITH_ENCRYPTION_PATH').to_s, filepath, blob.key, exception: true)
end
def initialize(procedure:, archive:, filepath:)
@procedure = procedure
@archive = archive
@filepath = filepath
end
end

View file

@ -34,12 +34,9 @@ class ProcedureArchiveService
end end
attachments = create_list_of_attachments(dossiers) attachments = create_list_of_attachments(dossiers)
download_and_zip(attachments) do |zip_file| download_and_zip(attachments) do |zip_filepath|
archive.file.attach( ArchiveUploader.new(procedure: @procedure, archive: archive, filepath: zip_filepath)
io: File.open(zip_file), .upload
filename: archive.filename(@procedure),
metadata: { virus_scan_result: ActiveStorage::VirusScanner::SAFE }
)
end end
archive.make_available! archive.make_available!
InstructeurMailer.send_archive(instructeur, @procedure, archive).deliver_later InstructeurMailer.send_archive(instructeur, @procedure, archive).deliver_later

View file

@ -99,3 +99,16 @@ MATOMO_IFRAME_URL="https://matomo.example.org/index.php?module=CoreAdminHome&act
# Landing page sections # Landing page sections
# LANDING_TESTIMONIALS_ENABLED="enabled" # LANDING_TESTIMONIALS_ENABLED="enabled"
# LANDING_USERS_ENABLED="enabled" # LANDING_USERS_ENABLED="enabled"
# Archive creation options
# when we create an archive of a Procedure, the worker uses this directory as a root in order to build our archives (archive are build within a tmp_dir in this dir)
# ARCHIVE_CREATION_DIR='/tmp'
# max parallel download when creating an archive
# ARCHIVE_DOWNLOAD_MAX_PARALLEL=10
# Archive when encryption of massive file options
# depending on your object storage backend (ie: aws::s3/ovh::object_storage), it may requires a custom upload strategy for big file if you encrypt your files in case of data breach
# suggested value is 4.gigabytes (4294967296)
# ACTIVE_STORAGE_FILE_SIZE_THRESHOLD_BEFORE_CUSTOM_UPLOAD=4294967296
# a custom script handling upload of big file
# ACTIVE_STORAGE_BIG_FILE_UPLOADER_WITH_ENCRYPTION_PATH='/usr/local/bin/swift'

View file

@ -0,0 +1,26 @@
# Mark existing migrations as safe
StrongMigrations.start_after = 20220315125851
# Set timeouts for migrations
# If you use PgBouncer in transaction mode, delete these lines and set timeouts on the database user
StrongMigrations.lock_timeout = 10.seconds
StrongMigrations.statement_timeout = 1.hour
# Analyze tables after indexes are added
# Outdated statistics can sometimes hurt performance
StrongMigrations.auto_analyze = true
# Set the version of the production database
# so the right checks are run in development
# StrongMigrations.target_version = 10
# Add custom checks
# StrongMigrations.add_check do |method, args|
# if method == :add_index && args[0].to_s == "users"
# stop! "No more indexes on the users table"
# end
# end
# Make some operations safe by default
# See https://github.com/ankane/strong_migrations#safe-by-default
# StrongMigrations.safe_by_default = true

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,85 @@
# Object Storange And Data Encryption
## Object Storage
By default, demarches-simplifiees.fr uses an [OVH Object Storage](https://www.ovhcloud.com/en/public-cloud/object-storage/) backend. The hard-drives are encrypted at rest, but to protect user files even better, demarches-simplifiees.fr can also use an external encryption proxy, that will encrypt and decrypt files on the fly:
* Encryption is done via our [proxy](https://github.com/betagouv/ds_proxy) when the file is uploaded by a client.
* Decryption is done via the same proxy when the file is downloaded to a client
### Object Storage limitation
As an s3 compatible object storage backend, OVH Object Storage suffers the same limitations.
One of them being that when you upload a file bigger than 5Go, it must be chunked into segments (see the [documentation](https://docs.ovh.com/fr/storage/pcs/capabilities-and-limitations/#max_file_size-5368709122-5gb)).
This process to chunks the file in segment, then re-arrange it via a manifest. Unfortunately encryption can't work with this usecase.
So we are using a custom script that wraps two call to our proxy in order to buffer all the chunks, encrypt/decrypt the whole. Here is an example
```
#!/usr/bin/env bash
# wrapper script to encrypt and upload file received from archive
set -o errexit
set -o pipefail
set -o nounset
# params
# 1: filename
# 2: key
if ! [ "$#" -eq 2 ]; then
echo "usage: $0 <filename> <key>"
exit 1
fi
local_file_path=$1
remote_basename=$(basename $local_file_path)
key=$2
# encrypt
curl -s -XPUT http://ds_proxy_host:ds_proxy_port/local/encrypt/${remote_basename} --data-binary @${local_file_path}
# get back encrypted file
encrypted_filename="${local_file_path}.enc"
curl -s http://ds_proxy_host:ds_proxy_port/local/encrypt/${remote_basename} -o ${encrypted_filename}
# OVH openstack params
os_tenant_name=os_tenant_name
os_username=os_username
os_password=os_password
os_region_name=GRA
# auth = https://auth.cloud.ovh.net/v3/
# use haproxy proxy and not direct internet URL
os_auth_url="os_auth_url"
os_storage_url="os_storage_url" \
container_name=container_name
expiring_delay="$((60 * 60 * 24 * 4))" # 4 days
# upload
/usr/local/bin/swift \
--auth-version 3 \
--os-auth-url "$os_auth_url" \
--os-storage-url "$os_storage_url" \
--os-region-name "$os_region_name" \
--os-tenant-name "$os_tenant_name" \
--os-username "$os_username" \
--os-password "$os_password" \
upload \
--header "X-Delete-After: ${expiring_delay}" \
--segment-size "$((3 * 1024 * 1024 * 1024))" \
--header "Content-Disposition: filename=${remote_basename}" \
--object-name "${key}" \
"${container_name}" "${encrypted_filename}"
swift_exit_code=$?
# cleanup
rm ${encrypted_filename}
# return swift return code
exit ${swift_exit_code}
```

View file

@ -5,11 +5,7 @@ describe Database::MigrationHelpers do
before(:all) do before(:all) do
ActiveRecord::Migration.suppress_messages do ActiveRecord::Migration.suppress_messages do
ActiveRecord::Migration.create_table "test_labels", force: true do |t| ActiveRecord::Migration.create_table "test_labels" do |t|
t.string :label
t.integer :user_id
end
ActiveRecord::Migration.create_table "test_labels", force: true do |t|
t.string :label t.string :label
t.integer :user_id t.integer :user_id
end end
@ -103,13 +99,13 @@ describe Database::MigrationHelpers do
before(:all) do before(:all) do
ActiveRecord::Migration.suppress_messages do ActiveRecord::Migration.suppress_messages do
ActiveRecord::Migration.create_table "test_physicians", force: true do |t| ActiveRecord::Migration.create_table "test_physicians" do |t|
t.string :name t.string :name
end end
ActiveRecord::Migration.create_table "test_patients", force: true do |t| ActiveRecord::Migration.create_table "test_patients" do |t|
t.string :name t.string :name
end end
ActiveRecord::Migration.create_table "test_appointments", id: false, force: true do |t| ActiveRecord::Migration.create_table "test_appointments", id: false do |t|
t.integer :test_physician_id t.integer :test_physician_id
t.integer :test_patient_id t.integer :test_patient_id
t.datetime :datetime t.datetime :datetime

View file

@ -0,0 +1,70 @@
describe ProcedureArchiveService do
let(:procedure) { build(:procedure) }
let(:archive) { create(:archive) }
let(:file) { Tempfile.new }
let(:fixture_blob) { ActiveStorage::Blob.create_before_direct_upload!(filename: File.basename(file.path), byte_size: file.size, checksum: 'osf') }
let(:uploader) { ArchiveUploader.new(procedure: procedure, archive: archive, filepath: file.path) }
describe '.upload' do
context 'when active storage service is local' do
it 'uploads with upload_with_active_storage' do
expect(uploader).to receive(:active_storage_service_local?).and_return(true)
expect(uploader).to receive(:upload_with_active_storage).and_return(fixture_blob)
uploader.upload
end
it 'link the created blob as an attachment to the current archive instance' do
expect { uploader.upload }
.to change { ActiveStorage::Attachment.where(name: 'file', record_type: 'Archive', record_id: archive.id).count }.by(1)
end
end
context 'when active storage service is not local' do
before do
expect(uploader).to receive(:active_storage_service_local?).and_return(false)
expect(File).to receive(:size).with(file.path).and_return(filesize)
end
context 'when file is smaller than MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING' do
let(:filesize) { ArchiveUploader::MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING - 1 }
it 'uploads with upload_with_active_storage' do
expect(uploader).to receive(:upload_with_active_storage).and_return(fixture_blob)
uploader.upload
end
end
context 'when file is bigger than MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING' do
let(:filesize) { ArchiveUploader::MAX_FILE_SIZE_FOR_BACKEND_BEFORE_CHUNKING + 1 }
it 'uploads with upload_with_chunking_wrapper' do
expect(uploader).to receive(:upload_with_chunking_wrapper).and_return(fixture_blob)
uploader.upload
end
it 'link the created blob as an attachment to the current archive instance' do
expect(uploader).to receive(:upload_with_chunking_wrapper).and_return(fixture_blob)
expect { uploader.upload }
.to change { ActiveStorage::Attachment.where(name: 'file', record_type: 'Archive', record_id: archive.id).count }.by(1)
end
end
end
end
describe '.upload_with_chunking_wrapper' do
let(:fake_blob_checksum) { Digest::SHA256.file(file.path) }
let(:fake_blob_bytesize) { 100.gigabytes }
before do
expect(uploader).to receive(:syscall_to_custom_uploader).and_return(true)
expect(File).to receive(:size).with(file.path).and_return(fake_blob_bytesize)
expect(Digest::SHA256).to receive(:file).with(file.path).and_return(double(hexdigest: fake_blob_checksum.hexdigest))
end
it 'creates a blob' do
expect { uploader.send(:upload_with_chunking_wrapper) }
.to change { ActiveStorage::Blob.where(checksum: fake_blob_checksum.hexdigest, byte_size: fake_blob_bytesize).count }.by(1)
end
end
end