Commit bf306df3 authored by Sean Arnold's avatar Sean Arnold

Fix Time.now => Time.current

- Update usage
- Update specs
parent 90051967
......@@ -73,7 +73,7 @@ module Reenqueuer
# end
#
def ensure_minimum_duration(minimum_duration)
start_time = Time.now
start_time = Time.current
result = yield
......@@ -95,7 +95,7 @@ module Reenqueuer
end
def elapsed_time(start_time)
Time.now - start_time
Time.current - start_time
end
end
end
......@@ -62,7 +62,7 @@ module Gitlab
end
def build_label_attrs(issue_id, label_id)
time = Time.now
time = Time.current
{
label_id: label_id,
target_id: issue_id,
......
......@@ -80,7 +80,7 @@ class ProcessCommitWorker
# manually parse these values.
hash.each do |key, value|
if key.to_s.end_with?(date_suffix) && value.is_a?(String)
hash[key] = Time.parse(value)
hash[key] = Time.zone.parse(value)
end
end
......
......@@ -34,7 +34,7 @@ module RepositoryCheck
end
def perform_repository_checks
start = Time.now
start = Time.current
# This loop will break after a little more than one hour ('a little
# more' because `git fsck` may take a few minutes), or if it runs out of
......@@ -42,7 +42,7 @@ module RepositoryCheck
# RepositoryCheckWorker each hour so that as long as there are repositories to
# check, only one (or two) will be checked at a time.
project_ids.each do |project_id|
break if Time.now - start >= RUN_TIME
break if Time.current - start >= RUN_TIME
next unless try_obtain_lease_for_project(project_id)
......
......@@ -17,7 +17,7 @@ module RepositoryCheck
def update_repository_check_status(project, healthy)
project.update_columns(
last_repository_check_failed: !healthy,
last_repository_check_at: Time.now
last_repository_check_at: Time.current
)
end
......
......@@ -49,8 +49,8 @@ module Geo
job_id = Geo::ProjectSyncWorker.perform_async(
project_id,
sync_repository: registry.repository_sync_due?(Time.now),
sync_wiki: registry.wiki_sync_due?(Time.now)
sync_repository: registry.repository_sync_due?(Time.current),
sync_wiki: registry.wiki_sync_due?(Time.current)
)
{ project_id: project_id, job_id: job_id } if job_id
......
......@@ -30,7 +30,7 @@ module Geo
# remaining jobs, excluding ones in progress.
# 5. Quit when we have scheduled all jobs or exceeded MAX_RUNTIME.
def perform
@start_time = Time.now.utc
@start_time = Time.current.utc
@loops = 0
# Prevent multiple Sidekiq workers from attempting to schedule jobs
......@@ -65,7 +65,7 @@ module Geo
log_error(err.message)
raise err
ensure
duration = Time.now.utc - start_time
duration = Time.current.utc - start_time
log_info('Finished scheduler', total_loops: loops, duration: duration, reason: reason)
end
end
......@@ -108,7 +108,7 @@ module Geo
end
def over_time?
(Time.now.utc - start_time) >= run_time
(Time.current.utc - start_time) >= run_time
end
def should_apply_backoff?
......@@ -198,7 +198,7 @@ module Geo
def node_enabled?
# Only check every minute to avoid polling the DB excessively
unless @last_enabled_check.present? && @last_enabled_check > 1.minute.ago
@last_enabled_check = Time.now
@last_enabled_check = Time.current
clear_memoization(:current_node_enabled)
end
......
......@@ -38,7 +38,7 @@ class UpdateAllMirrorsWorker # rubocop:disable Scalability/IdempotentWorker
# Ignore mirrors that become due for scheduling once work begins, so we
# can't end up in an infinite loop
now = Time.now
now = Time.current
last = nil
scheduled = 0
......@@ -73,8 +73,8 @@ class UpdateAllMirrorsWorker # rubocop:disable Scalability/IdempotentWorker
if scheduled > 0
# Wait for all ProjectImportScheduleWorker jobs to complete
deadline = Time.now + SCHEDULE_WAIT_TIMEOUT
sleep 1 while ProjectImportScheduleWorker.queue_size > 0 && Time.now < deadline
deadline = Time.current + SCHEDULE_WAIT_TIMEOUT
sleep 1 while ProjectImportScheduleWorker.queue_size > 0 && Time.current < deadline
end
scheduled
......
......@@ -11,7 +11,7 @@ RSpec.describe Ci::BatchResetMinutesWorker do
id: 1,
shared_runners_minutes_limit: 100,
extra_shared_runners_minutes_limit: 50,
last_ci_minutes_notification_at: Time.now,
last_ci_minutes_notification_at: Time.current,
last_ci_minutes_usage_notification_level: 30)
end
......@@ -20,7 +20,7 @@ RSpec.describe Ci::BatchResetMinutesWorker do
id: 10,
shared_runners_minutes_limit: 100,
extra_shared_runners_minutes_limit: 50,
last_ci_minutes_notification_at: Time.now,
last_ci_minutes_notification_at: Time.current,
last_ci_minutes_usage_notification_level: 30)
end
......
......@@ -36,7 +36,7 @@ RSpec.describe ClearSharedRunnersMinutesWorker do
it 'resets timer' do
subject
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.now)
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.current)
end
context 'when there are namespaces that were not reset after the reset steps' do
......@@ -68,7 +68,7 @@ RSpec.describe ClearSharedRunnersMinutesWorker do
it 'resets timer' do
subject
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.now)
expect(statistics.reload.shared_runners_seconds_last_reset).to be_like_time(Time.current)
end
end
......@@ -118,7 +118,7 @@ RSpec.describe ClearSharedRunnersMinutesWorker do
[:last_ci_minutes_notification_at, :last_ci_minutes_usage_notification_level].each do |attr|
context "when #{attr} is present" do
before do
namespace.update_attribute(attr, Time.now)
namespace.update_attribute(attr, Time.current)
end
it 'nullifies the field' do
......
......@@ -12,7 +12,7 @@ describe ClusterUpdateAppWorker do
subject { described_class.new }
around do |example|
Timecop.freeze(Time.now) { example.run }
Timecop.freeze(Time.current) { example.run }
end
before do
......@@ -22,11 +22,11 @@ describe ClusterUpdateAppWorker do
describe '#perform' do
context 'when the application last_update_started_at is higher than the time the job was scheduled in' do
it 'does nothing' do
application = create(:clusters_applications_prometheus, :updated, last_update_started_at: Time.now)
application = create(:clusters_applications_prometheus, :updated, last_update_started_at: Time.current)
expect(prometheus_update_service).not_to receive(:execute)
expect(subject.perform(application.name, application.id, project.id, Time.now - 5.minutes)).to be_nil
expect(subject.perform(application.name, application.id, project.id, Time.current - 5.minutes)).to be_nil
end
end
......@@ -34,7 +34,7 @@ describe ClusterUpdateAppWorker do
it 'returns nil' do
application = create(:clusters_applications_prometheus, :updating)
expect(subject.perform(application.name, application.id, project.id, Time.now)).to be_nil
expect(subject.perform(application.name, application.id, project.id, Time.current)).to be_nil
end
end
......@@ -43,7 +43,7 @@ describe ClusterUpdateAppWorker do
expect(prometheus_update_service).to receive(:execute)
subject.perform(application.name, application.id, project.id, Time.now)
subject.perform(application.name, application.id, project.id, Time.current)
end
context 'with exclusive lease' do
......@@ -60,7 +60,7 @@ describe ClusterUpdateAppWorker do
it 'does not allow same app to be updated concurrently by same project' do
expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
subject.perform(application.name, application.id, project.id, Time.now)
subject.perform(application.name, application.id, project.id, Time.current)
end
it 'does not allow same app to be updated concurrently by different project', :aggregate_failures do
......@@ -68,7 +68,7 @@ describe ClusterUpdateAppWorker do
expect(Clusters::Applications::PrometheusUpdateService).not_to receive(:new)
subject.perform(application.name, application.id, project1.id, Time.now)
subject.perform(application.name, application.id, project1.id, Time.current)
end
it 'allows different app to be updated concurrently by same project' do
......@@ -80,7 +80,7 @@ describe ClusterUpdateAppWorker do
expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
.with(application2, project)
subject.perform(application2.name, application2.id, project.id, Time.now)
subject.perform(application2.name, application2.id, project.id, Time.current)
end
it 'allows different app to be updated by different project', :aggregate_failures do
......@@ -94,7 +94,7 @@ describe ClusterUpdateAppWorker do
expect(Clusters::Applications::PrometheusUpdateService).to receive(:new)
.with(application2, project2)
subject.perform(application2.name, application2.id, project2.id, Time.now)
subject.perform(application2.name, application2.id, project2.id, Time.current)
end
end
end
......
......@@ -32,7 +32,7 @@ describe ExpireBuildInstanceArtifactsWorker do
context 'with not yet expired artifacts' do
let_it_be(:build) do
create(:ci_build, :artifacts, artifacts_expire_at: Time.now + 7.days)
create(:ci_build, :artifacts, artifacts_expire_at: Time.current + 7.days)
end
it 'does not expire' do
......
......@@ -18,7 +18,7 @@ describe PipelineMetricsWorker do
ref: 'master',
sha: project.repository.commit('master').id,
started_at: 1.hour.ago,
finished_at: Time.now)
finished_at: Time.current)
end
let(:status) { 'pending' }
......
......@@ -33,7 +33,7 @@ describe PipelineScheduleWorker do
expect(Ci::Pipeline.last).to be_schedule
pipeline_schedule.reload
expect(pipeline_schedule.next_run_at).to be > Time.now
expect(pipeline_schedule.next_run_at).to be > Time.current
expect(pipeline_schedule).to eq(project.ci_pipelines.last.pipeline_schedule)
expect(pipeline_schedule).to be_active
end
......
......@@ -200,7 +200,7 @@ describe ProcessCommitWorker do
it 'parses date strings into Time instances' do
commit = worker.build_commit(project,
id: '123',
authored_date: Time.now.to_s)
authored_date: Time.current.to_s)
expect(commit.authored_date).to be_an_instance_of(Time)
end
......
......@@ -7,7 +7,7 @@ describe RepositoryCheck::ClearWorker do
project = create(:project)
project.update_columns(
last_repository_check_failed: true,
last_repository_check_at: Time.now
last_repository_check_at: Time.current
)
described_class.new.perform
......
......@@ -6,10 +6,10 @@ describe RepositoryUpdateRemoteMirrorWorker, :clean_gitlab_redis_shared_state do
subject { described_class.new }
let(:remote_mirror) { create(:remote_mirror) }
let(:scheduled_time) { Time.now - 5.minutes }
let(:scheduled_time) { Time.current - 5.minutes }
around do |example|
Timecop.freeze(Time.now) { example.run }
Timecop.freeze(Time.current) { example.run }
end
def expect_mirror_service_to_return(mirror, result, tries = 0)
......@@ -26,7 +26,7 @@ describe RepositoryUpdateRemoteMirrorWorker, :clean_gitlab_redis_shared_state do
end
it 'does not do anything if the mirror was already updated' do
remote_mirror.update(last_update_started_at: Time.now, update_status: :finished)
remote_mirror.update(last_update_started_at: Time.current, update_status: :finished)
expect(Projects::UpdateRemoteMirrorService).not_to receive(:new)
......@@ -48,7 +48,7 @@ describe RepositoryUpdateRemoteMirrorWorker, :clean_gitlab_redis_shared_state do
expect_next_instance_of(Projects::UpdateRemoteMirrorService) do |service|
expect(service).to receive(:execute).with(remote_mirror, 1).and_raise('Unexpected!')
end
expect { subject.perform(remote_mirror.id, Time.now, 1) }.to raise_error('Unexpected!')
expect { subject.perform(remote_mirror.id, Time.current, 1) }.to raise_error('Unexpected!')
lease = Gitlab::ExclusiveLease.new("#{described_class.name}:#{remote_mirror.id}", timeout: 1.second)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment