Revert "clean(delayed_jobs): remove dependencies and all occurences"
This reverts commit 90ca937b7131575816d72e35e2c48f9d82e9a5e6. ReRevert me in order to remove all delayed job occurences. But we keep this dependencie for our instances at least for a year
This commit is contained in:
parent
5eec93bc8c
commit
02590b3a73
17 changed files with 247 additions and 10 deletions
4
Gemfile
4
Gemfile
|
@ -26,6 +26,9 @@ gem 'chunky_png'
|
|||
gem 'clamav-client', require: 'clamav/client'
|
||||
gem 'daemons'
|
||||
gem 'deep_cloneable' # Enable deep clone of active record models
|
||||
gem 'delayed_cron_job', require: false # Cron jobs
|
||||
gem 'delayed_job_active_record'
|
||||
gem 'delayed_job_web'
|
||||
gem 'devise'
|
||||
gem 'devise-i18n'
|
||||
gem 'devise-two-factor'
|
||||
|
@ -86,6 +89,7 @@ gem 'rexml' # add missing gem due to ruby3 (https://github.com/Shopify/bootsnap/
|
|||
gem 'rqrcode'
|
||||
gem 'saml_idp'
|
||||
gem 'sassc-rails' # Use SCSS for stylesheets
|
||||
gem 'sentry-delayed_job'
|
||||
gem 'sentry-rails'
|
||||
gem 'sentry-ruby'
|
||||
gem 'sentry-sidekiq'
|
||||
|
|
27
Gemfile.lock
27
Gemfile.lock
|
@ -191,6 +191,18 @@ GEM
|
|||
date (3.3.4)
|
||||
deep_cloneable (3.2.0)
|
||||
activerecord (>= 3.1.0, < 8)
|
||||
delayed_cron_job (0.9.0)
|
||||
fugit (>= 1.5)
|
||||
delayed_job (4.1.11)
|
||||
activesupport (>= 3.0, < 8.0)
|
||||
delayed_job_active_record (4.1.8)
|
||||
activerecord (>= 3.0, < 8.0)
|
||||
delayed_job (>= 3.0, < 5)
|
||||
delayed_job_web (1.4.4)
|
||||
activerecord (> 3.0.0)
|
||||
delayed_job (> 2.0.3)
|
||||
rack-protection (>= 1.5.5)
|
||||
sinatra (>= 1.4.4)
|
||||
descendants_tracker (0.0.4)
|
||||
thread_safe (~> 0.3, >= 0.3.1)
|
||||
devise (4.9.4)
|
||||
|
@ -436,6 +448,8 @@ GEM
|
|||
minitest (5.25.1)
|
||||
msgpack (1.7.2)
|
||||
multi_json (1.15.0)
|
||||
mustermann (3.0.0)
|
||||
ruby2_keywords (~> 0.0.1)
|
||||
net-http (0.4.1)
|
||||
uri
|
||||
net-imap (0.4.12)
|
||||
|
@ -665,6 +679,7 @@ GEM
|
|||
ruby-progressbar (1.13.0)
|
||||
ruby-vips (2.2.0)
|
||||
ffi (~> 1.12)
|
||||
ruby2_keywords (0.0.5)
|
||||
rubyzip (2.3.2)
|
||||
saml_idp (0.16.0)
|
||||
activesupport (>= 5.2)
|
||||
|
@ -699,6 +714,9 @@ GEM
|
|||
rexml (~> 3.2, >= 3.2.5)
|
||||
rubyzip (>= 1.2.2, < 3.0)
|
||||
websocket (~> 1.0)
|
||||
sentry-delayed_job (5.17.3)
|
||||
delayed_job (>= 4.0)
|
||||
sentry-ruby (~> 5.17.3)
|
||||
sentry-rails (5.17.3)
|
||||
railties (>= 5.0)
|
||||
sentry-ruby (~> 5.17.3)
|
||||
|
@ -737,6 +755,11 @@ GEM
|
|||
simplecov_json_formatter (0.1.4)
|
||||
simpleidn (0.2.1)
|
||||
unf (~> 0.1.4)
|
||||
sinatra (3.2.0)
|
||||
mustermann (~> 3.0)
|
||||
rack (~> 2.2, >= 2.2.4)
|
||||
rack-protection (= 3.2.0)
|
||||
tilt (~> 2.0)
|
||||
skylight (6.0.4)
|
||||
activesupport (>= 5.2.0)
|
||||
smart_properties (1.17.0)
|
||||
|
@ -891,6 +914,9 @@ DEPENDENCIES
|
|||
clamav-client
|
||||
daemons
|
||||
deep_cloneable
|
||||
delayed_cron_job
|
||||
delayed_job_active_record
|
||||
delayed_job_web
|
||||
devise
|
||||
devise-i18n
|
||||
devise-two-factor
|
||||
|
@ -974,6 +1000,7 @@ DEPENDENCIES
|
|||
scss_lint
|
||||
selenium-devtools
|
||||
selenium-webdriver
|
||||
sentry-delayed_job
|
||||
sentry-rails
|
||||
sentry-ruby
|
||||
sentry-sidekiq
|
||||
|
|
|
@ -33,12 +33,11 @@ Vous souhaitez y apporter des changements ou des améliorations ? Lisez notre [
|
|||
</policymap>
|
||||
```
|
||||
|
||||
Nous sommes en cours de migration de `delayed_job` vers `sidekiq` pour le traitement des jobs asynchrones.
|
||||
Pour faire tourner sidekiq, vous aurez besoin de :
|
||||
|
||||
- redis
|
||||
|
||||
#### Crédits et licences
|
||||
|
||||
- lightgallery : une license a été souscrite pour soutenir le projet, mais elle n'est pas obligatoire si la librairie est utilisée dans le cadre d'une application open source.
|
||||
|
||||
#### Développement
|
||||
|
|
|
@ -38,7 +38,11 @@ class Cron::CronJob < ApplicationJob
|
|||
end
|
||||
|
||||
def enqueued_cron_job
|
||||
sidekiq_cron_job
|
||||
if queue_adapter_name == "sidekiq"
|
||||
sidekiq_cron_job
|
||||
else
|
||||
delayed_job
|
||||
end
|
||||
end
|
||||
|
||||
def sidekiq_cron_job
|
||||
|
|
46
app/jobs/cron/release_crashed_export_job.rb
Normal file
46
app/jobs/cron/release_crashed_export_job.rb
Normal file
|
@ -0,0 +1,46 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
class Cron::ReleaseCrashedExportJob < Cron::CronJob
|
||||
self.schedule_expression = "every 10 minute"
|
||||
SECSCAN_LIMIT = 20_000
|
||||
|
||||
def perform(*args)
|
||||
return if !performable?
|
||||
export_jobs = jobs_for_current_host
|
||||
|
||||
return if export_jobs.empty?
|
||||
|
||||
host_pids = Sys::ProcTable.ps.map(&:pid)
|
||||
export_jobs.each do |job|
|
||||
_, pid = hostname_and_pid(job.locked_by)
|
||||
|
||||
reset(job:) if host_pids.exclude?(pid.to_i)
|
||||
end
|
||||
end
|
||||
|
||||
def reset(job:)
|
||||
job.locked_by = nil
|
||||
job.locked_at = nil
|
||||
job.attempts += 1
|
||||
job.save!
|
||||
end
|
||||
|
||||
def hostname_and_pid(worker_name)
|
||||
matches = /host:(?<host>.*) pid:(?<pid>\d+)/.match(worker_name)
|
||||
[matches[:host], matches[:pid]]
|
||||
end
|
||||
|
||||
def jobs_for_current_host
|
||||
Delayed::Job.where("locked_by like ?", "%#{whoami}%")
|
||||
.where(queue: ExportJob.queue_name)
|
||||
end
|
||||
|
||||
def whoami
|
||||
me, _ = hostname_and_pid(Delayed::Worker.new.name)
|
||||
me
|
||||
end
|
||||
|
||||
def performable?
|
||||
Delayed::Job.count < SECSCAN_LIMIT
|
||||
end
|
||||
end
|
|
@ -23,6 +23,7 @@ as defined by the routes in the `admin/` namespace
|
|||
|
||||
<hr />
|
||||
|
||||
<%= link_to "Delayed Jobs", manager_delayed_job_path, class: "navigation__link" %>
|
||||
<%= link_to "Sidekiq", manager_sidekiq_web_path, class: "navigation__link" %>
|
||||
<%= link_to "Maintenance Tasks", manager_maintenance_tasks_path, class: "navigation__link" %>
|
||||
<%= link_to "Features", manager_flipper_path, class: "navigation__link" %>
|
||||
|
|
5
bin/delayed_job
Executable file
5
bin/delayed_job
Executable file
|
@ -0,0 +1,5 @@
|
|||
#!/usr/bin/env ruby
|
||||
|
||||
require File.expand_path(File.join(File.dirname(__FILE__), '..', 'config', 'environment'))
|
||||
require 'delayed/command'
|
||||
Delayed::Command.new(ARGV).daemonize
|
|
@ -100,6 +100,18 @@ namespace :service do
|
|||
#{echo_cmd %[test -f #{webserver_file_path} && sudo systemctl reload nginx]}
|
||||
}
|
||||
end
|
||||
|
||||
desc "Restart delayed_job"
|
||||
task :restart_delayed_job do
|
||||
worker_file_path = File.join(deploy_to, 'shared', SHARED_WORKER_FILE_NAME)
|
||||
|
||||
command %{
|
||||
echo "-----> Restarting delayed_job service"
|
||||
#{echo_cmd %[test -f #{worker_file_path} && echo 'it is a worker marchine, restarting delayed_job']}
|
||||
#{echo_cmd %[test -f #{worker_file_path} && sudo systemctl restart delayed_job]}
|
||||
#{echo_cmd %[test -f #{worker_file_path} || echo "it is not a worker marchine, #{worker_file_path} is absent"]}
|
||||
}
|
||||
end
|
||||
end
|
||||
|
||||
desc "Deploys the current version to the server."
|
||||
|
@ -123,6 +135,7 @@ task :deploy do
|
|||
on :launch do
|
||||
invoke :'service:restart_puma'
|
||||
invoke :'service:reload_nginx'
|
||||
invoke :'service:restart_delayed_job'
|
||||
invoke :'deploy:cleanup'
|
||||
end
|
||||
end
|
||||
|
|
|
@ -111,8 +111,8 @@ Rails.application.configure do
|
|||
# Annotate rendered view with file names.
|
||||
# config.action_view.annotate_rendered_view_with_filenames = true
|
||||
|
||||
# We use the async adapter by default, but sidekiq can be set using
|
||||
# RAILS_QUEUE_ADAPTER=sidekiq bin/rails server
|
||||
# We use the async adapter by default, but delayed_job can be set using
|
||||
# RAILS_QUEUE_ADAPTER=delayed_job bin/rails server
|
||||
config.active_job.queue_adapter = ENV.fetch('RAILS_QUEUE_ADAPTER', 'async').to_sym
|
||||
|
||||
# Use an evented file watcher to asynchronously detect changes in source code,
|
||||
|
|
6
config/initializers/delayed_job.rb
Normal file
6
config/initializers/delayed_job.rb
Normal file
|
@ -0,0 +1,6 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
# Set max_run_time at the highest job duration we want,
|
||||
# then at job level we'll decrease this value to a lower value
|
||||
# except for ExportJob.
|
||||
Delayed::Worker.max_run_time = 16.hours # same as Export::MAX_DUREE_GENERATION but we can't yet use this constant here
|
|
@ -22,10 +22,27 @@ Sentry.init do |config|
|
|||
# transaction_context is the transaction object in hash form
|
||||
# keep in mind that sampling happens right after the transaction is initialized
|
||||
# for example, at the beginning of the request
|
||||
if sampling_context[:transaction_context].dig(:env, "REQUEST_METHOD") == "GET"
|
||||
0.001
|
||||
else
|
||||
0.01
|
||||
transaction_context = sampling_context[:transaction_context]
|
||||
|
||||
# transaction_context helps you sample transactions with more sophistication
|
||||
# for example, you can provide different sample rates based on the operation or name
|
||||
case transaction_context[:op]
|
||||
when /delayed_job/
|
||||
contexts = Sentry.get_current_scope.contexts
|
||||
job_class = contexts.dig(:"Active-Job", :job_class)
|
||||
attempts = contexts.dig(:"Delayed-Job", :attempts)
|
||||
max_attempts = job_class.safe_constantize&.new&.max_attempts rescue 25
|
||||
|
||||
# Don't trace on all attempts
|
||||
[0, 2, 5, 10, 20, max_attempts].include?(attempts)
|
||||
else # rails requests
|
||||
if sampling_context.dig(:env, "REQUEST_METHOD") == "GET"
|
||||
0.001
|
||||
else
|
||||
0.01
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
config.delayed_job.report_after_job_retries = false # don't wait for all attempts before reporting
|
||||
end
|
||||
|
|
|
@ -107,6 +107,7 @@ Rails.application.routes.draw do
|
|||
|
||||
authenticate :super_admin do
|
||||
mount Flipper::UI.app(-> { Flipper.instance }) => "/features", as: :flipper
|
||||
match "/delayed_job" => DelayedJobWeb, :anchor => false, :via => [:get, :post]
|
||||
mount MaintenanceTasks::Engine => "/maintenance_tasks"
|
||||
mount Sidekiq::Web => "/sidekiq"
|
||||
end
|
||||
|
|
|
@ -37,6 +37,6 @@ task :rollback do
|
|||
branch = ENV.fetch('BRANCH')
|
||||
|
||||
domains.each do |domain|
|
||||
sh "mina rollback service:restart_puma service:reload_nginx domain=#{domain} branch=#{branch}"
|
||||
sh "mina rollback service:restart_puma service:reload_nginx service:restart_delayed_job domain=#{domain} branch=#{branch}"
|
||||
end
|
||||
end
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
namespace :after_party do
|
||||
desc 'Deployment task: remove_old_cron_job_from_delayed_job_table'
|
||||
task remove_old_cron_job_from_delayed_job_table: :environment do
|
||||
puts "Running deploy task 'remove_old_cron_job_from_delayed_job_table'"
|
||||
|
||||
cron = Delayed::Job.where.not(cron: nil)
|
||||
.where("handler LIKE ?", "%UpdateAdministrateurUsageStatisticsJob%")
|
||||
.first
|
||||
cron.destroy if cron
|
||||
|
||||
AfterParty::TaskRecord
|
||||
.create version: AfterParty::TaskRecorder.new(__FILE__).timestamp
|
||||
end
|
||||
end
|
|
@ -0,0 +1,16 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
namespace :after_party do
|
||||
desc 'Deployment task: remove_old_find_dubious_procedures_job_from_delayed_job_table'
|
||||
task remove_old_dubious_proc_job_from_delayed_job_table: :environment do
|
||||
puts "Running deploy task 'remove_old_dubious_proc_job_from_delayed_job_table'"
|
||||
|
||||
cron = Delayed::Job.where.not(cron: nil)
|
||||
.where("handler LIKE ?", "%FindDubiousProceduresJob%")
|
||||
.first
|
||||
cron.destroy if cron
|
||||
|
||||
AfterParty::TaskRecord
|
||||
.create version: AfterParty::TaskRecorder.new(__FILE__).timestamp
|
||||
end
|
||||
end
|
|
@ -0,0 +1,27 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
namespace :after_party do
|
||||
desc 'Deployment task: destroy_dossier_transfer_without_email'
|
||||
task destroy_dossier_transfer_without_email: :environment do
|
||||
puts "Running deploy task 'destroy_dossier_transfer_without_email'"
|
||||
|
||||
invalid_dossiers = DossierTransfer.where(email: "")
|
||||
|
||||
progress = ProgressReport.new(invalid_dossiers.count)
|
||||
|
||||
invalid_dossiers.find_each do |dossier_transfer|
|
||||
puts "Destroy dossier transfer #{dossier_transfer.id}"
|
||||
dossier_transfer.destroy_and_nullify
|
||||
|
||||
job = Delayed::Job.where("handler LIKE ALL(ARRAY[?, ?])", "%ActionMailer::MailDeliveryJob%", "%aj_globalid: gid://tps/DossierTransfer/#{dossier_transfer.id}\n%").first
|
||||
job.destroy if job
|
||||
|
||||
progress.inc
|
||||
end
|
||||
|
||||
# Update task as completed. If you remove the line below, the task will
|
||||
# run with every deploy (or every time you call after_party:run).
|
||||
AfterParty::TaskRecord
|
||||
.create version: AfterParty::TaskRecorder.new(__FILE__).timestamp
|
||||
end
|
||||
end
|
55
spec/jobs/cron/release_crashed_export_job_spec.rb
Normal file
55
spec/jobs/cron/release_crashed_export_job_spec.rb
Normal file
|
@ -0,0 +1,55 @@
|
|||
# frozen_string_literal: true
|
||||
|
||||
describe Cron::ReleaseCrashedExportJob do
|
||||
let(:handler) { "whocares" }
|
||||
|
||||
def locked_by(hostname)
|
||||
"delayed_job.33 host:#{hostname} pid:1252488"
|
||||
end
|
||||
|
||||
describe '.perform' do
|
||||
subject { described_class.new.perform }
|
||||
let!(:job) { Delayed::Job.create!(handler:, queue: ExportJob.queue_name, locked_by: locked_by(Socket.gethostname)) }
|
||||
|
||||
it 'releases lock' do
|
||||
expect { subject }.to change { job.reload.locked_by }.from(anything).to(nil)
|
||||
end
|
||||
it 'increases attempts' do
|
||||
expect { subject }.to change { job.reload.attempts }.by(1)
|
||||
end
|
||||
end
|
||||
|
||||
describe '.hostname_and_pid' do
|
||||
subject { described_class.new.hostname_and_pid(Delayed::Worker.new.name) }
|
||||
it 'extract hostname and pid from worker.name' do
|
||||
hostname, pid = subject
|
||||
|
||||
expect(hostname).to eq(Socket.gethostname)
|
||||
expect(pid).to eq(Process.pid.to_s)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'whoami' do
|
||||
subject { described_class.new.whoami }
|
||||
it { is_expected.to eq(Socket.gethostname) }
|
||||
end
|
||||
|
||||
describe 'jobs_for_current_host' do
|
||||
subject { described_class.new.jobs_for_current_host }
|
||||
|
||||
context 'when jobs run an another host' do
|
||||
let!(:job) { Delayed::Job.create!(handler:, queue: :default, locked_by: locked_by('spec1.prod')) }
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'when jobs run an same host with default queue' do
|
||||
let!(:job) { Delayed::Job.create!(handler:, queue: :default, locked_by: locked_by(Socket.gethostname)) }
|
||||
it { is_expected.to be_empty }
|
||||
end
|
||||
|
||||
context 'when jobs run an same host with exports queue' do
|
||||
let!(:job) { Delayed::Job.create!(handler:, queue: ExportJob.queue_name, locked_by: locked_by(Socket.gethostname)) }
|
||||
it { is_expected.to include(job) }
|
||||
end
|
||||
end
|
||||
end
|
Loading…
Reference in a new issue