Welcome to mirror list, hosted at ThFree Co, Russian Federation.

gitlab.com/gitlab-org/gitlab-foss.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKamil Trzcinski <ayufan@ayufan.eu>2015-12-16 18:29:53 +0300
committerJames Edwards-Jones <jedwardsjones@gitlab.com>2017-02-01 01:50:40 +0300
commitd28f1a7f4aa4bdf664e04a43022667e4e7637e73 (patch)
tree54bebb663237646cf17bba7e049a84e1b73ced46 /app/workers
parentadc1a9abb5adbf746b492938cb11576753edcc7e (diff)
Split PagesWorker
Diffstat (limited to 'app/workers')
-rw-r--r--app/workers/pages_worker.rb101
1 files changed, 61 insertions, 40 deletions
diff --git a/app/workers/pages_worker.rb b/app/workers/pages_worker.rb
index 59f4b4f16f4..c34259c15f1 100644
--- a/app/workers/pages_worker.rb
+++ b/app/workers/pages_worker.rb
@@ -12,62 +12,83 @@ class PagesWorker
return unless valid?
# Create status notifying the deployment of pages
- @status = GenericCommitStatus.new(
- project: project,
- commit: build.commit,
- user: build.user,
- ref: build.ref,
- stage: 'deploy',
- name: 'pages:deploy'
- )
+ @status = create_status
@status.run!
-
- FileUtils.mkdir_p(tmp_path)
-
- # Calculate dd parameters: we limit the size of pages
- max_size = current_application_settings.max_pages_size.megabytes
- max_size ||= MAX_SIZE
- blocks = 1 + max_size / BLOCK_SIZE
+ raise 'pages are outdated' unless latest?
# Create temporary directory in which we will extract the artifacts
- Dir.mktmpdir(nil, tmp_path) do |temp_path|
- # We manually extract the archive and limit the archive size with dd
- results = Open3.pipeline(%W(gunzip -c #{artifacts}),
- %W(dd bs=#{BLOCK_SIZE} count=#{blocks}),
- %W(tar -x -C #{temp_path} public/),
- err: '/dev/null')
- return unless results.compact.all?(&:success?)
+ Dir.mktmpdir(nil, tmp_path) do |archive_path|
+ results = extract_archive(archive_path)
+ raise 'pages failed to extract' unless results.all?(&:success?)
# Check if we did extract public directory
- temp_public_path = File.join(temp_path, 'public')
- return unless Dir.exists?(temp_public_path)
+ archive_public_path = File.join(archive_path, 'public')
+ raise 'pages miss the public folder' unless Dir.exists?(archive_public_path)
+ raise 'pages are outdated' unless latest?
+ deploy_page!(archive_public_path)
- FileUtils.mkdir_p(pages_path)
+ @status.success
+ end
+ rescue => e
+ fail(e.message, !latest?)
+ end
- # Ignore deployment if the HEAD changed when we were extracting the archive
- return unless valid?
+ private
- # Do atomic move of pages
- # Move and removal may not be atomic, but they are significantly faster then extracting and removal
- # 1. We move deployed public to previous public path (file removal is slow)
- # 2. We move temporary public to be deployed public
- # 3. We remove previous public path
- FileUtils.move(public_path, previous_public_path, force: true)
- FileUtils.move(temp_public_path, public_path)
- FileUtils.rm_r(previous_public_path, force: true)
+ def create_status
+ GenericCommitStatus.new(
+ project: project,
+ commit: build.commit,
+ user: build.user,
+ ref: build.ref,
+ stage: 'deploy',
+ name: 'pages:deploy'
+ )
+ end
- @status.success
- end
+ def extract_archive(temp_path)
+ results = Open3.pipeline(%W(gunzip -c #{artifacts}),
+ %W(dd bs=#{BLOCK_SIZE} count=#{blocks}),
+ %W(tar -x -C #{temp_path} public/),
+ err: '/dev/null')
+ results.compact
+ end
+
+ def deploy_page!(archive_public_path)
+ # Do atomic move of pages
+ # Move and removal may not be atomic, but they are significantly faster then extracting and removal
+ # 1. We move deployed public to previous public path (file removal is slow)
+ # 2. We move temporary public to be deployed public
+ # 3. We remove previous public path
+ FileUtils.mkdir_p(pages_path)
+ FileUtils.move(public_path, previous_public_path, force: true)
+ FileUtils.move(archive_public_path, public_path)
ensure
- @status.drop if @status && @status.active?
+ FileUtils.rm_r(previous_public_path, force: true)
end
- private
+ def fail(message, allow_failure = true)
+ @status.allow_failure = allow_failure
+ @status.description = message
+ @status.drop
+ end
def valid?
+ build && build.artifacts_file?
+ end
+
+ def latest?
# check if sha for the ref is still the most recent one
# this helps in case when multiple deployments happens
- build && build.artifacts_file? && sha == latest_sha
+ sha == latest_sha
+ end
+
+ def blocks
+ # Calculate dd parameters: we limit the size of pages
+ max_size = current_application_settings.max_pages_size.megabytes
+ max_size ||= MAX_SIZE
+ blocks = 1 + max_size / BLOCK_SIZE
+ blocks
end
def build