Commit dcbebfa9 authored by GitLab Bot's avatar GitLab Bot

Automatic merge of gitlab-org/gitlab-ce master

parents 18e6aae4 ac2971a2
import Vue from 'vue';
import { __ } from '../locale';
import createFlash from '../flash';
import axios from '../lib/utils/axios_utils';
import DivergenceGraph from './components/divergence_graph.vue';
export default () => {
document.querySelectorAll('.js-branch-divergence-graph').forEach(el => {
const { distance, aheadCount, behindCount, defaultBranch, maxCommits } = el.dataset;
return new Vue({
el,
render(h) {
return h(DivergenceGraph, {
props: {
defaultBranch,
distance: distance ? parseInt(distance, 10) : null,
aheadCount: parseInt(aheadCount, 10),
behindCount: parseInt(behindCount, 10),
maxCommits: parseInt(maxCommits, 10),
},
});
},
});
export function createGraphVueApp(el, data, maxCommits) {
return new Vue({
el,
render(h) {
return h(DivergenceGraph, {
props: {
defaultBranch: 'master',
distance: data.distance ? parseInt(data.distance, 10) : null,
aheadCount: parseInt(data.ahead, 10),
behindCount: parseInt(data.behind, 10),
maxCommits,
},
});
},
});
}
export default endpoint => {
const names = [...document.querySelectorAll('.js-branch-item')].map(
({ dataset }) => dataset.name,
);
return axios
.get(endpoint, {
params: { names },
})
.then(({ data }) => {
const maxCommits = Object.entries(data).reduce((acc, [, val]) => {
const max = Math.max(...Object.values(val));
return max > acc ? max : acc;
}, 100);
Object.entries(data).forEach(([branchName, val]) => {
const el = document.querySelector(`.js-branch-${branchName} .js-branch-divergence-graph`);
if (!el) return;
createGraphVueApp(el, val, maxCommits);
});
})
.catch(() =>
createFlash(__('Error fetching diverging counts for branches. Please try again.')),
);
};
......@@ -5,5 +5,5 @@ import initDiverganceGraph from '~/branches/divergence_graph';
document.addEventListener('DOMContentLoaded', () => {
AjaxLoadingSpinner.init();
new DeleteModal(); // eslint-disable-line no-new
initDiverganceGraph();
initDiverganceGraph(document.querySelector('.js-branch-list').dataset.divergingCountsEndpoint);
});
- merged = local_assigns.fetch(:merged, false)
- commit = @repository.commit(branch.dereferenced_target)
- diverging_commit_counts = Branches::DivergingCommitCountsService.new(@repository).call(branch)
- number_commits_distance = diverging_commit_counts[:distance]
- number_commits_behind = diverging_commit_counts[:behind]
- number_commits_ahead = diverging_commit_counts[:ahead]
- merge_project = merge_request_source_project_for_project(@project)
%li{ class: "branch-item js-branch-#{branch.name}" }
%li{ class: "branch-item js-branch-item js-branch-#{branch.name}", data: { name: branch.name } }
.branch-info
.branch-title
= sprite_icon('fork', size: 12)
......@@ -30,7 +26,7 @@
= s_('Branches|Cant find HEAD commit for this branch')
- if branch.name != @repository.root_ref
.js-branch-divergence-graph{ data: { distance: number_commits_distance, ahead_count: number_commits_ahead, behind_count: number_commits_behind, default_branch: @repository.root_ref, max_commits: @max_commits } }
.js-branch-divergence-graph
.controls.d-none.d-md-block<
- if merge_project && create_mr_button?(@repository.root_ref, branch.name)
......
......@@ -47,6 +47,7 @@
= render_if_exists 'projects/commits/mirror_status'
.js-branch-list{ data: { diverging_counts_endpoint: diverging_commit_counts_namespace_project_branches_path(@project.namespace, @project, format: :json) } }
- if can?(current_user, :admin_project, @project)
- project_settings_link = link_to s_('Branches|project settings'), project_protected_branches_path(@project)
.row-content-block
......
......@@ -43,14 +43,21 @@ if !Rails.env.test? && Gitlab::Metrics.prometheus_metrics_enabled?
end
end
Gitlab::Cluster::LifecycleEvents.on_master_restart do
def cleanup_prometheus_multiproc_dir
# The following is necessary to ensure stale Prometheus metrics don't
# accumulate over time. It needs to be done in this hook as opposed to
# inside an init script to ensure metrics files aren't deleted after new
# unicorn workers start after a SIGUSR2 is received.
prometheus_multiproc_dir = ENV['prometheus_multiproc_dir']
if prometheus_multiproc_dir
old_metrics = Dir[File.join(prometheus_multiproc_dir, '*.db')]
if dir = ::Prometheus::Client.configuration.multiprocess_files_dir
old_metrics = Dir[File.join(dir, '*.db')]
FileUtils.rm_rf(old_metrics)
end
end
Gitlab::Cluster::LifecycleEvents.on_master_start do
cleanup_prometheus_multiproc_dir
end
Gitlab::Cluster::LifecycleEvents.on_master_restart do
cleanup_prometheus_multiproc_dir
end
......@@ -88,9 +88,21 @@ before_exec do |server|
Gitlab::Cluster::LifecycleEvents.do_master_restart
end
run_once = true
before_fork do |server, worker|
# Signal application hooks that we're about to fork
Gitlab::Cluster::LifecycleEvents.do_before_fork
if run_once
# There is a difference between Puma and Unicorn:
# - Puma calls before_fork once when booting up master process
# - Unicorn runs before_fork whenever new work is spawned
# To unify this behavior we call before_fork only once (we use
# this callback for deleting Prometheus files so for our purposes
# it makes sense to align behavior with Puma)
run_once = false
# Signal application hooks that we're about to fork
Gitlab::Cluster::LifecycleEvents.do_before_fork
end
# The following is only recommended for memory/DB-constrained
# installations. It is not needed if your system can house
......
......@@ -21,9 +21,21 @@ before_exec do |server|
Gitlab::Cluster::LifecycleEvents.do_master_restart
end
run_once = true
before_fork do |server, worker|
# Signal application hooks that we're about to fork
Gitlab::Cluster::LifecycleEvents.do_before_fork
if run_once
# There is a difference between Puma and Unicorn:
# - Puma calls before_fork once when booting up master process
# - Unicorn runs before_fork whenever new work is spawned
# To unify this behavior we call before_fork only once (we use
# this callback for deleting Prometheus files so for our purposes
# it makes sense to align behavior with Puma)
run_once = false
# Signal application hooks that we're about to fork
Gitlab::Cluster::LifecycleEvents.do_before_fork
end
# The following is only recommended for memory/DB-constrained
# installations. It is not needed if your system can house
......
......@@ -327,7 +327,7 @@ However, if a spec makes direct Redis calls, it should mark itself with the
`:clean_gitlab_redis_queues` traits as appropriate.
Sidekiq jobs are typically not run in specs, but this behaviour can be altered
in each spec through the use of `Sidekiq::Testing.inline!` blocks. Any spec that
in each spec through the use of `perform_enqueued_jobs` blocks. Any spec that
causes Sidekiq jobs to be pushed to Redis should use the `:sidekiq` trait, to
ensure that they are removed once the spec completes.
......
......@@ -11,6 +11,9 @@ module Gitlab
# We have three lifecycle events.
#
# - before_fork (only in forking processes)
# In forking processes (Unicorn and Puma in multiprocess mode) this
# will be called exactly once, on startup, before the workers are
# forked. This will be called in the parent process.
# - worker_start
# - before_master_restart (only in forking processes)
#
......
......@@ -5191,6 +5191,9 @@ msgstr ""
msgid "Error fetching contributors data."
msgstr ""
msgid "Error fetching diverging counts for branches. Please try again."
msgstr ""
msgid "Error fetching labels."
msgstr ""
......
import MockAdapter from 'axios-mock-adapter';
import axios from '~/lib/utils/axios_utils';
import init from '~/branches/divergence_graph';
describe('Divergence graph', () => {
let mock;
beforeEach(() => {
mock = new MockAdapter(axios);
mock.onGet('/-/diverging_counts').reply(200, {
master: { ahead: 1, behind: 1 },
});
jest.spyOn(axios, 'get');
document.body.innerHTML = `
<div class="js-branch-item" data-name="master"></div>
`;
});
afterEach(() => {
mock.restore();
});
it('calls axos get with list of branch names', () =>
init('/-/diverging_counts').then(() => {
expect(axios.get).toHaveBeenCalledWith('/-/diverging_counts', {
params: { names: ['master'] },
});
}));
});
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment