Commit 58ceab72 authored by Douwe Maan's avatar Douwe Maan

Merge branch '119-remove-gitlab-reference-counter' into 'master'

Remove direct redis integration

Closes #119

See merge request gitlab-org/gitlab-shell!181
parents fa2b35a7 d1c01fe8
REDIS_RB_VERSION=v3.3.3
REDIS_RB_VENDOR_DIR=lib/vendor/redis
PWD=`pwd`
all:
update-redis:
rm -rf $(REDIS_RB_VENDOR_DIR)
git clone -b $(REDIS_RB_VERSION) https://github.com/redis/redis-rb.git $(REDIS_RB_VENDOR_DIR)
rm -rf $(REDIS_RB_VENDOR_DIR)/.git
.PHONY=update-redis
......@@ -3,17 +3,6 @@
require_relative '../lib/gitlab_init'
require_relative '../lib/gitlab_net'
def ping_redis
print "Send ping to redis server: "
if GitlabNet.new.redis_client.ping
print 'OK'
else
abort 'FAILED'
end
puts "\n"
end
#
# GitLab shell check task
#
......@@ -30,16 +19,12 @@ begin
check_values = JSON.parse(resp.body)
if check_values.key?('redis')
print 'Redis available via internal API: '
if check_values['redis']
puts 'OK'
else
abort 'FAILED'
end
else
ping_redis
end
rescue GitlabNet::ApiUnreachableError
abort "FAILED: Failed to connect to internal API"
end
......
......@@ -35,23 +35,6 @@ auth_file: "/home/git/.ssh/authorized_keys"
# Default is hooks in the gitlab-shell directory.
# custom_hooks_dir: "/home/git/gitlab-shell/hooks"
# Redis settings used for pushing commit notices to gitlab
redis:
# host: 127.0.0.1
# port: 6379
# pass: redispass # Allows you to specify the password for Redis
database: 0
socket: /var/run/redis/redis.sock # Comment out this line if you want to use TCP or Sentinel
namespace: resque:gitlab
# sentinels:
# -
# host: 127.0.0.1
# port: 26380
# -
# host: 127.0.0.1
# port: 26381
# Log file.
# Default is gitlab-shell.log in the root directory.
# log_file: "/home/git/gitlab-shell/gitlab-shell.log"
......
......@@ -13,8 +13,6 @@ def increase_reference_counter(gl_repository, repo_path)
result = GitlabNet.new.pre_receive(gl_repository)
result['reference_counter_increased']
rescue GitlabNet::NotFound
GitlabReferenceCounter.new(repo_path).increase
end
require_relative '../lib/gitlab_custom_hook'
......
......@@ -34,14 +34,6 @@ class GitlabConfig
@config['http_settings'] ||= {}
end
def redis
@config['redis'] ||= {}
end
def redis_namespace
redis['namespace'] || 'resque:gitlab'
end
def log_file
@config['log_file'] ||= File.join(ROOT_PATH, 'gitlab-shell.log')
end
......
......@@ -5,7 +5,6 @@ require 'json'
require_relative 'gitlab_config'
require_relative 'gitlab_logger'
require_relative 'gitlab_access'
require_relative 'gitlab_redis'
require_relative 'gitlab_lfs_authentication'
require_relative 'httpunix'
......@@ -140,30 +139,6 @@ class GitlabNet
JSON.parse(resp.body) if resp.code == '200'
end
def redis_client
redis_config = config.redis
database = redis_config['database'] || 0
params = {
host: redis_config['host'] || '127.0.0.1',
port: redis_config['port'] || 6379,
db: database
}
if redis_config.has_key?('sentinels')
params[:sentinels] = redis_config['sentinels']
.select { |s| s['host'] && s['port'] }
.map { |s| { host: s['host'], port: s['port'] } }
end
if redis_config.has_key?("socket")
params = { path: redis_config['socket'], db: database }
elsif redis_config.has_key?("pass")
params[:password] = redis_config['pass']
end
Redis.new(params)
end
protected
def sanitize_path(repo)
......
require_relative 'gitlab_init'
require_relative 'gitlab_net'
require_relative 'gitlab_reference_counter'
require_relative 'gitlab_metrics'
require 'json'
require 'base64'
......@@ -32,8 +31,6 @@ class GitlabPostReceive
response['reference_counter_decreased']
rescue GitlabNet::ApiUnreachableError
false
rescue GitlabNet::NotFound
fallback_post_receive
end
protected
......@@ -95,55 +92,4 @@ class GitlabPostReceive
puts
puts "=" * total_width
end
def update_redis
# Encode changes as base64 so we don't run into trouble with non-UTF-8 input.
changes = Base64.encode64(@changes)
# TODO: Change to `@gl_repository` in next release.
# See https://gitlab.com/gitlab-org/gitlab-shell/merge_requests/130#note_28747613
project_identifier = @gl_repository || @repo_path
queue = "#{config.redis_namespace}:queue:post_receive"
msg = JSON.dump({
'class' => 'PostReceive',
'args' => [project_identifier, @actor, changes],
'jid' => @jid,
'enqueued_at' => Time.now.to_f
})
begin
GitlabNet.new.redis_client.rpush(queue, msg)
true
rescue => e
$stderr.puts "GitLab: An unexpected error occurred in writing to Redis: #{e}"
false
end
end
private
def fallback_post_receive
result = update_redis
begin
broadcast_message = GitlabMetrics.measure("broadcast-message") do
api.broadcast_message
end
if broadcast_message.has_key?("message")
print_broadcast_message(broadcast_message["message"])
end
merge_request_urls = GitlabMetrics.measure("merge-request-urls") do
api.merge_request_urls(@gl_repository, @repo_path, @changes)
end
print_merge_request_links(merge_request_urls)
api.notify_post_receive(gl_repository, repo_path)
rescue GitlabNet::ApiUnreachableError
nil
end
result && GitlabReferenceCounter.new(repo_path).decrease
end
end
$:.unshift(File.expand_path(File.join(File.dirname(__FILE__), 'vendor/redis/lib')))
require 'redis'
require_relative 'gitlab_init'
require_relative 'gitlab_net'
class GitlabReferenceCounter
REFERENCE_EXPIRE_TIME = 600
attr_reader :path, :key
def initialize(path)
@path = path
@key = "git-receive-pack-reference-counter:#{path}"
end
def value
(redis_client.get(key) || 0).to_i
end
def increase
redis_cmd do
redis_client.incr(key)
redis_client.expire(key, REFERENCE_EXPIRE_TIME)
end
end
def decrease
redis_cmd do
current_value = redis_client.decr(key)
if current_value < 0
$logger.warn "Reference counter for #{path} decreased when its value was less than 1. Reseting the counter."
redis_client.del(key)
end
end
end
private
def redis_client
@redis_client ||= GitlabNet.new.redis_client
end
def redis_cmd
begin
yield
true
rescue => e
message = "GitLab: An unexpected error occurred in writing to Redis: #{e}"
$stderr.puts message
$logger.error message
false
end
end
end
This diff is collapsed.
This diff is collapsed.
require "redis/connection/registry"
# If a connection driver was required before this file, the array
# Redis::Connection.drivers will contain one or more classes. The last driver
# in this array will be used as default driver. If this array is empty, we load
# the plain Ruby driver as our default. Another driver can be required at a
# later point in time, causing it to be the last element of the #drivers array
# and therefore be chosen by default.
require "redis/connection/ruby" if Redis::Connection.drivers.empty?
\ No newline at end of file
class Redis
module Connection
module CommandHelper
COMMAND_DELIMITER = "\r\n"
def build_command(args)
command = [nil]
args.each do |i|
if i.is_a? Array
i.each do |j|
j = j.to_s
command << "$#{j.bytesize}"
command << j
end
else
i = i.to_s
command << "$#{i.bytesize}"
command << i
end
end
command[0] = "*#{(command.length - 1) / 2}"
# Trailing delimiter
command << ""
command.join(COMMAND_DELIMITER)
end
protected
if defined?(Encoding::default_external)
def encode(string)
string.force_encoding(Encoding::default_external)
end
else
def encode(string)
string
end
end
end
end
end
require "redis/connection/registry"
require "redis/errors"
require "hiredis/connection"
require "timeout"
class Redis
module Connection
class Hiredis
def self.connect(config)
connection = ::Hiredis::Connection.new
connect_timeout = (config.fetch(:connect_timeout, 0) * 1_000_000).to_i
if config[:scheme] == "unix"
connection.connect_unix(config[:path], connect_timeout)
elsif config[:scheme] == "rediss" || config[:ssl]
raise NotImplementedError, "SSL not supported by hiredis driver"
else
connection.connect(config[:host], config[:port], connect_timeout)
end
instance = new(connection)
instance.timeout = config[:read_timeout]
instance
rescue Errno::ETIMEDOUT
raise TimeoutError
end
def initialize(connection)
@connection = connection
end
def connected?
@connection && @connection.connected?
end
def timeout=(timeout)
# Hiredis works with microsecond timeouts
@connection.timeout = Integer(timeout * 1_000_000)
end
def disconnect
@connection.disconnect
@connection = nil
end
def write(command)
@connection.write(command.flatten(1))
rescue Errno::EAGAIN
raise TimeoutError
end
def read
reply = @connection.read
reply = CommandError.new(reply.message) if reply.is_a?(RuntimeError)
reply
rescue Errno::EAGAIN
raise TimeoutError
rescue RuntimeError => err
raise ProtocolError.new(err.message)
end
end
end
end
Redis::Connection.drivers << Redis::Connection::Hiredis
class Redis
module Connection
# Store a list of loaded connection drivers in the Connection module.
# Redis::Client uses the last required driver by default, and will be aware
# of the loaded connection drivers if the user chooses to override the
# default connection driver.
def self.drivers
@drivers ||= []
end
end
end
This diff is collapsed.
require "redis/connection/command_helper"
require "redis/connection/registry"
require "redis/errors"
require "em-synchrony"
require "hiredis/reader"
class Redis
module Connection
class RedisClient < EventMachine::Connection
include EventMachine::Deferrable
attr_accessor :timeout
def post_init
@req = nil
@connected = false
@reader = ::Hiredis::Reader.new
end
def connection_completed
@connected = true
succeed
end
def connected?
@connected
end
def receive_data(data)
@reader.feed(data)
loop do
begin
reply = @reader.gets
rescue RuntimeError => err
@req.fail [:error, ProtocolError.new(err.message)]
break
end
break if reply == false
reply = CommandError.new(reply.message) if reply.is_a?(RuntimeError)
@req.succeed [:reply, reply]
end
end
def read
@req = EventMachine::DefaultDeferrable.new
if @timeout > 0
@req.timeout(@timeout, :timeout)
end
EventMachine::Synchrony.sync @req
end
def send(data)
callback { send_data data }
end
def unbind
@connected = false
if @req
@req.fail [:error, Errno::ECONNRESET]
@req = nil
else
fail
end
end
end
class Synchrony
include Redis::Connection::CommandHelper
def self.connect(config)
if config[:scheme] == "unix"
conn = EventMachine.connect_unix_domain(config[:path], RedisClient)
elsif config[:scheme] == "rediss" || config[:ssl]
raise NotImplementedError, "SSL not supported by synchrony driver"
else
conn = EventMachine.connect(config[:host], config[:port], RedisClient) do |c|
c.pending_connect_timeout = [config[:connect_timeout], 0.1].max
end
end
fiber = Fiber.current
conn.callback { fiber.resume }
conn.errback { fiber.resume :refused }
raise Errno::ECONNREFUSED if Fiber.yield == :refused
instance = new(conn)
instance.timeout = config[:read_timeout]
instance
end
def initialize(connection)
@connection = connection
end
def connected?
@connection && @connection.connected?
end
def timeout=(timeout)
@connection.timeout = timeout
end
def disconnect
@connection.close_connection
@connection = nil
end
def write(command)
@connection.send(build_command(command))
end
def read
type, payload = @connection.read
if type == :reply
payload
elsif type == :error
raise payload
elsif type == :timeout
raise TimeoutError
else
raise "Unknown type #{type.inspect}"
end
end
end
end
end
Redis::Connection.drivers << Redis::Connection::Synchrony
This diff is collapsed.
class Redis
# Base error for all redis-rb errors.
class BaseError < RuntimeError
end
# Raised by the connection when a protocol error occurs.
class ProtocolError < BaseError
def initialize(reply_type)
super(<<-EOS.gsub(/(?:^|\n)\s*/, " "))
Got '#{reply_type}' as initial reply byte.
If you're in a forking environment, such as Unicorn, you need to
connect to Redis after forking.
EOS
end
end
# Raised by the client when command execution returns an error reply.
class CommandError < BaseError
end
# Base error for connection related errors.
class BaseConnectionError < BaseError
end
# Raised when connection to a Redis server cannot be made.
class CannotConnectError < BaseConnectionError
end
# Raised when connection to a Redis server is lost.
class ConnectionError < BaseConnectionError
end
# Raised when performing I/O times out.
class TimeoutError < BaseConnectionError
end
# Raised when the connection was inherited by a child process.
class InheritedError < BaseConnectionError
end
end
require 'zlib'
class Redis
class HashRing
POINTS_PER_SERVER = 160 # this is the default in libmemcached
attr_reader :ring, :sorted_keys, :replicas, :nodes
# nodes is a list of objects that have a proper to_s representation.
# replicas indicates how many virtual points should be used pr. node,
# replicas are required to improve the distribution.
def initialize(nodes=[], replicas=POINTS_PER_SERVER)
@replicas = replicas
@ring = {}
@nodes = []
@sorted_keys = []
nodes.each do |node|
add_node(node)
end
end
# Adds a `node` to the hash ring (including a number of replicas).
def add_node(node)
@nodes << node
@replicas.times do |i|
key = Zlib.crc32("#{node.id}:#{i}")
raise "Node ID collision" if @ring.has_key?(key)
@ring[key] = node
@sorted_keys << key
end
@sorted_keys.sort!
end
def remove_node(node)
@nodes.reject!{|n| n.id == node.id}
@replicas.times do |i|
key = Zlib.crc32("#{node.id}:#{i}")
@ring.delete(key)
@sorted_keys.reject! {|k| k == key}
end
end
# get the node in the hash ring for this key
def get_node(key)
get_node_pos(key)[0]
end
def get_node_pos(key)
return [nil,nil] if @ring.size == 0
crc = Zlib.crc32(key)
idx = HashRing.binary_search(@sorted_keys, crc)
return [@ring[@sorted_keys[idx]], idx]
end
def iter_nodes(key)
return [nil,nil] if @ring.size == 0
_, pos = get_node_pos(key)
@ring.size.times do |n|
yield @ring[@sorted_keys[(pos+n) % @ring.size]]
end
end
class << self
# gem install RubyInline to use this code
# Native extension to perform the binary search within the hashring.
# There's a pure ruby version below so this is purely optional
# for performance. In testing 20k gets and sets, the native
# binary search shaved about 12% off the runtime (9sec -> 8sec).
begin
require 'inline'
inline do |builder|
builder.c <<-EOM
int binary_search(VALUE ary, unsigned int r) {
int upper = RARRAY_LEN(ary) - 1;
int lower = 0;
int idx = 0;
while (lower <= upper) {
idx = (lower + upper) / 2;
VALUE continuumValue = RARRAY_PTR(ary)[idx];
unsigned int l = NUM2UINT(continuumValue);
if (l == r) {
return idx;
}
else if (l > r) {
upper = idx - 1;
}
else {
lower = idx + 1;
}
}
if (upper < 0) {
upper = RARRAY_LEN(ary) - 1;
}
return upper;
}
EOM
end
rescue Exception
# Find the closest index in HashRing with value <= the given value
def binary_search(ary, value, &block)
upper = ary.size - 1
lower = 0
idx = 0
while(lower <= upper) do
idx = (lower + upper) / 2
comp = ary[idx] <=> value
if comp == 0
return idx
elsif comp > 0
upper = idx - 1
else
lower = idx + 1
end
end
if upper < 0
upper = ary.size - 1
end
return upper
end
end
end
end
end
class Redis
unless defined?(::BasicObject)
class BasicObject
instance_methods.each { |meth| undef_method(meth) unless meth =~ /\A(__|instance_eval)/ }
end
end
class Pipeline
attr_accessor :db
attr :futures
def initialize
@with_reconnect = true
@shutdown = false
@futures = []
end
def with_reconnect?
@with_reconnect
end
def without_reconnect?
!@with_reconnect
end
def shutdown?
@shutdown
end
def call(command, &block)
# A pipeline that contains a shutdown should not raise ECONNRESET when
# the connection is gone.
@shutdown = true if command.first == :shutdown
future = Future.new(command, block)
@futures << future
future
end
def call_pipeline(pipeline)
@shutdown = true if pipeline.shutdown?
@futures.concat(pipeline.futures)
@db = pipeline.db
nil
end
def commands
@futures.map { |f| f._command }
end
def with_reconnect(val=true)
@with_reconnect = false unless val
yield
end
def without_reconnect(&blk)
with_reconnect(false, &blk)
end
def finish(replies, &blk)
if blk
futures.each_with_index.map do |future, i|
future._set(blk.call(replies[i]))
end
else
futures.each_with_index.map do |future, i|
future._set(replies[i])
end
end
end
class Multi < self
def finish(replies)
exec = replies.last
return if exec.nil? # The transaction failed because of WATCH.
# EXEC command failed.
raise exec if exec.is_a?(CommandError)
if exec.size < futures.size
# Some command wasn't recognized by Redis.
raise replies.detect { |r| r.is_a?(CommandError) }
end
super(exec) do |reply|
# Because an EXEC returns nested replies, hiredis won't be able to
# convert an error reply to a CommandError instance itself. This is
# specific to MULTI/EXEC, so we solve this here.
reply.is_a?(::RuntimeError) ? CommandError.new(reply.message) : reply
end
end
def commands
[[:multi]] + super + [[:exec]]
end
end
end
class FutureNotReady < RuntimeError
def initialize
super("Value will be available once the pipeline executes.")
end
end
class Future < BasicObject
FutureNotReady = ::Redis::FutureNotReady.new
def initialize(command, transformation)
@command = command
@transformation = transformation
@object = FutureNotReady
end
def inspect
"<Redis::Future #{@command.inspect}>"
end
def _set(object)
@object = @transformation ? @transformation.call(object) : object
value
end
def _command
@command
end
def value
::Kernel.raise(@object) if @object.kind_of?(::RuntimeError)
@object
end
def is_a?(other)
self.class.ancestors.include?(other)
end
def class
Future
end
end
end
class Redis
class SubscribedClient
def initialize(client)
@client = client
end
def call(command)
@client.process([command])
end
def subscribe(*channels, &block)
subscription("subscribe", "unsubscribe", channels, block)
end
def subscribe_with_timeout(timeout, *channels, &block)
subscription("subscribe", "unsubscribe", channels, block, timeout)
end
def psubscribe(*channels, &block)
subscription("psubscribe", "punsubscribe", channels, block)
end
def psubscribe_with_timeout(timeout, *channels, &block)
subscription("psubscribe", "punsubscribe", channels, block, timeout)
end
def unsubscribe(*channels)
call([:unsubscribe, *channels])
end
def punsubscribe(*channels)
call([:punsubscribe, *channels])
end
protected
def subscription(start, stop, channels, block, timeout = 0)
sub = Subscription.new(&block)
unsubscribed = false
begin
@client.call_loop([start, *channels], timeout) do |line|
type, *rest = line
sub.callbacks[type].call(*rest)
unsubscribed = type == stop && rest.last == 0
break if unsubscribed
end
ensure
# No need to unsubscribe here. The real client closes the connection
# whenever an exception is raised (see #ensure_connected).
end
end
end
class Subscription
attr :callbacks
def initialize
@callbacks = Hash.new do |hash, key|
hash[key] = lambda { |*_| }
end
yield(self)
end
def subscribe(&block)
@callbacks["subscribe"] = block
end
def unsubscribe(&block)
@callbacks["unsubscribe"] = block
end
def message(&block)
@callbacks["message"] = block
end
def psubscribe(&block)
@callbacks["psubscribe"] = block
end
def punsubscribe(&block)
@callbacks["punsubscribe"] = block
end
def pmessage(&block)
@callbacks["pmessage"] = block
end
end
end
class Redis
VERSION = "3.3.3"
end
redis:
host: 127.0.1.1
port: 6378
pass: secure
database: 1
socket: /var/run/redis/redis.sock
namespace: my:gitlab
sentinels:
-
host: 127.0.0.1
port: 26380
......@@ -4,21 +4,6 @@ require_relative '../lib/gitlab_config'
describe GitlabConfig do
let(:config) { GitlabConfig.new }
describe :redis do
before do
config_file = File.read('spec/fixtures/gitlab_config_redis.yml')
config.instance_variable_set(:@config, YAML.load(config_file))
end
it { config.redis['host'].should eq('127.0.1.1') }
it { config.redis['port'].should eq(6378) }
it { config.redis['database'].should eq(1) }
it { config.redis['namespace'].should eq('my:gitlab') }
it { config.redis['socket'].should eq('/var/run/redis/redis.sock') }
it { config.redis['pass'].should eq('secure') }
it { config.redis['sentinels'].should eq([{ 'host' => '127.0.0.1', 'port' => 26380 }]) }
end
describe :gitlab_url do
let(:url) { 'http://test.com' }
subject { config.gitlab_url }
......
......@@ -434,60 +434,4 @@ describe GitlabNet, vcr: true do
store.should_receive(:add_path).with('test_path')
end
end
describe '#redis_client' do
let(:config) { double('config') }
context "with empty redis config" do
it 'returns default parameters' do
allow(gitlab_net).to receive(:config).and_return(config)
allow(config).to receive(:redis).and_return( {} )
expect_any_instance_of(Redis).to receive(:initialize).with({ host: '127.0.0.1',
port: 6379,
db: 0 })
gitlab_net.redis_client
end
end
context "with password" do
it 'uses the specified host, port, and password' do
allow(gitlab_net).to receive(:config).and_return(config)
allow(config).to receive(:redis).and_return( { 'host' => 'localhost', 'port' => 1123, 'pass' => 'secret' } )
expect_any_instance_of(Redis).to receive(:initialize).with({ host: 'localhost',
port: 1123,
db: 0,
password: 'secret'})
gitlab_net.redis_client
end
end
context "with sentinels" do
it 'uses the specified sentinels' do
allow(gitlab_net).to receive(:config).and_return(config)
allow(config).to receive(:redis).and_return({ 'host' => 'localhost', 'port' => 1123,
'sentinels' => [{'host' => '127.0.0.1', 'port' => 26380}] })
expect_any_instance_of(Redis).to receive(:initialize).with({ host: 'localhost',
port: 1123,
db: 0,
sentinels: [{host: '127.0.0.1', port: 26380}] })
gitlab_net.redis_client
end
end
context "with redis socket" do
let(:socket) { '/tmp/redis.socket' }
it 'uses the socket' do
allow(gitlab_net).to receive(:config).and_return(config)
allow(config).to receive(:redis).and_return( { 'socket' => socket })
expect_any_instance_of(Redis).to receive(:initialize).with({ path: socket, db: 0 })
gitlab_net.redis_client
end
end
end
end
......@@ -13,7 +13,6 @@ describe GitlabPostReceive do
let(:gl_repository) { "project-1" }
let(:gitlab_post_receive) { GitlabPostReceive.new(gl_repository, repo_path, actor, wrongly_encoded_changes) }
let(:broadcast_message) { "test " * 10 + "message " * 10 }
let(:redis_client) { double('redis_client') }
let(:enqueued_at) { Time.new(2016, 6, 23, 6, 59) }
let(:new_merge_request_urls) do
[{
......@@ -36,151 +35,6 @@ describe GitlabPostReceive do
end
describe "#exec" do
context 'when the new post_receive API endpoint is not available' do
before do
GitlabNet.any_instance.stub(broadcast_message: { })
GitlabNet.any_instance.stub(:merge_request_urls).with(gl_repository, repo_path, wrongly_encoded_changes) { [] }
GitlabNet.any_instance.stub(notify_post_receive: true)
allow_any_instance_of(GitlabNet).to receive(:post_receive).and_raise(GitlabNet::NotFound)
allow_any_instance_of(GitlabNet).to receive(:redis_client).and_return(redis_client)
allow_any_instance_of(GitlabReferenceCounter).to receive(:redis_client).and_return(redis_client)
allow(redis_client).to receive(:get).and_return(1)
allow(redis_client).to receive(:incr).and_return(true)
allow(redis_client).to receive(:decr).and_return(0)
allow(redis_client).to receive(:rpush).and_return(true)
expect(Time).to receive(:now).and_return(enqueued_at)
end
context 'Without broad cast message' do
context 'pushing new branch' do
before do
GitlabNet.any_instance.stub(:merge_request_urls).with(gl_repository, repo_path, wrongly_encoded_changes) do
new_merge_request_urls
end
end
it "prints the new merge request url" do
assert_new_mr_printed(gitlab_post_receive)
gitlab_post_receive.exec
end
end
context 'pushing existing branch with merge request created' do
before do
GitlabNet.any_instance.stub(:merge_request_urls).with(gl_repository, repo_path, wrongly_encoded_changes) do
existing_merge_request_urls
end
end
it "prints the view merge request url" do
assert_existing_mr_printed(gitlab_post_receive)
gitlab_post_receive.exec
end
end
end
context 'show broadcast message and merge request link' do
before do
GitlabNet.any_instance.stub(:merge_request_urls).with(gl_repository, repo_path, wrongly_encoded_changes) do
new_merge_request_urls
end
GitlabNet.any_instance.stub(broadcast_message: { "message" => broadcast_message })
end
it 'prints the broadcast message and create new merge request link' do
assert_broadcast_message_printed(gitlab_post_receive)
assert_new_mr_printed(gitlab_post_receive)
gitlab_post_receive.exec
end
end
context 'Sidekiq jobs' do
it "pushes a Sidekiq job onto the queue" do
expect(redis_client).to receive(:rpush).with(
'resque:gitlab:queue:post_receive',
%Q/{"class":"PostReceive","args":["#{gl_repository}","#{actor}",#{base64_changes.inspect}],"jid":"#{gitlab_post_receive.jid}","enqueued_at":#{enqueued_at.to_f}}/
).and_return(true)
gitlab_post_receive.exec
end
context 'when gl_repository is nil' do
let(:gl_repository) { nil }
it "pushes a Sidekiq job with the repository path" do
expect(redis_client).to receive(:rpush).with(
'resque:gitlab:queue:post_receive',
%Q/{"class":"PostReceive","args":["#{repo_path}","#{actor}",#{base64_changes.inspect}],"jid":"#{gitlab_post_receive.jid}","enqueued_at":#{enqueued_at.to_f}}/
).and_return(true)
gitlab_post_receive.exec
end
end
end
context 'reference counter' do
it 'decreases the reference counter for the project' do
expect_any_instance_of(GitlabReferenceCounter).to receive(:decrease).and_return(true)
gitlab_post_receive.exec
end
context "when the redis command succeeds" do
before do
allow(redis_client).to receive(:decr).and_return(0)
end
it "returns true" do
expect(gitlab_post_receive.exec).to eq(true)
end
end
context "when the redis command fails" do
before do
allow(redis_client).to receive(:decr).and_raise('Fail')
end
it "returns false" do
expect(gitlab_post_receive.exec).to eq(false)
end
end
end
context 'post_receive notification' do
it 'calls the api to notify the execution of the hook' do
expect_any_instance_of(GitlabNet).to receive(:notify_post_receive).
with(gl_repository, repo_path)
gitlab_post_receive.exec
end
end
context "when the redis command succeeds" do
before do
allow(redis_client).to receive(:rpush).and_return(true)
end
it "returns true" do
expect(gitlab_post_receive.exec).to eq(true)
end
end
context "when the redis command fails" do
before do
allow(redis_client).to receive(:rpush).and_raise('Fail')
end
it "returns false" do
expect(gitlab_post_receive.exec).to eq(false)
end
end
end
context 'when the new post_receive API endpoint is available' do
let(:response) { { 'reference_counter_decreased' => true } }
it 'calls the api to notify the execution of the hook' do
......@@ -231,7 +85,6 @@ describe GitlabPostReceive do
end
end
end
end
private
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment