Remove resque_rescheduler as it was killing our app servers (receiving

http requests).

Revert "retry less"

This reverts commit 7784156e3b.

Revert "supress resque retries"

This reverts commit 80cdcd4edd.

Revert "Retry federation if remote pod is down"

This reverts commit bcbc86e502.

Conflicts:

	app/models/jobs/http_multi.rb
This commit is contained in:
danielgrippi 2011-11-07 12:37:54 -08:00
parent 9d91b70f9d
commit 6f7c9e9301
9 changed files with 30 additions and 66 deletions

View file

@ -71,8 +71,6 @@ gem 'ruby-oembed'
gem 'resque', '1.10.0'
gem 'resque-ensure-connected'
gem 'resque-timeout', '1.0.0'
gem 'resque-scheduler'
gem 'resque-retry'
gem 'SystemTimer', '1.2.1', :platforms => :ruby_18
# reporting

View file

@ -375,13 +375,6 @@ GEM
resque-ensure-connected (0.1.0)
activerecord (>= 2.3.5)
resque (~> 1.10.0)
resque-retry (0.1.0)
resque (>= 1.8.0)
resque-scheduler (>= 1.8.0)
resque-scheduler (1.9.9)
redis (>= 2.0.1)
resque (>= 1.8.0)
rufus-scheduler
resque-timeout (1.0.0)
resque (~> 1.0)
rest-client (1.6.1)
@ -423,8 +416,6 @@ GEM
archive-tar-minitar (>= 0.5.2)
rubyntlm (0.1.1)
rubyzip (0.9.4)
rufus-scheduler (2.0.11)
tzinfo (>= 0.3.23)
sass (3.1.7)
selenium-webdriver (2.7.0)
childprocess (>= 0.2.1)
@ -536,8 +527,6 @@ DEPENDENCIES
redcarpet (= 2.0.0b5)
resque (= 1.10.0)
resque-ensure-connected
resque-retry
resque-scheduler
resque-timeout (= 1.0.0)
rest-client (= 1.6.1)
roxml!

View file

@ -3,24 +3,16 @@
# the COPYRIGHT file.
require 'uri'
require 'resque-retry'
require File.join(Rails.root, 'lib/hydra_wrapper')
module Jobs
class HttpMulti < Base
extend Resque::Plugins::ExponentialBackoff
@queue = :http
@backoff_strategy = [10.minutes,
3.hours,
12.hours,
2.days]
def self.args_for_retry(user_id, encoded_object_xml, person_ids, dispatcher_class_as_string)
[user_id, encoded_object_xml, @failed_people, dispatcher_class_as_string]
end
MAX_RETRIES = 3
def self.perform(user_id, encoded_object_xml, person_ids, dispatcher_class_as_string)
def self.perform(user_id, encoded_object_xml, person_ids, dispatcher_class_as_string, retry_count=0)
user = User.find(user_id)
people = Person.where(:id => person_ids)
@ -30,16 +22,17 @@ module Jobs
hydra.enqueue_batch
hydra.run
@failed_people = hydra.failed_people
unless @failed_people.empty?
if self.retry_limit_reached?
msg = "event=http_multi_abandon sender_id=#{user_id} failed_recipient_ids='[#{@failed_people.join(', ')}]'"
Rails.logger.info(msg)
unless hydra.failed_people.empty?
if retry_count < MAX_RETRIES
Resque.enqueue(Jobs::HttpMulti, user_id, encoded_object_xml, hydra.failed_people, dispatcher_class_as_string, retry_count + 1 )
else
raise 'retry'
Rails.logger.info("event=http_multi_abandon sender_id=#{user_id} failed_recipient_ids='[#{person_ids.join(', ')}] '")
end
end
end
end
end

View file

@ -6,11 +6,7 @@ require File.join(Rails.root, 'lib/postzord/receiver/public')
module Jobs
class ReceiveUnencryptedSalmon < Base
extend Resque::Plugins::ExponentialBackoff
@queue = :receive
@backoff_strategy = [20.minutes,
1.day]
def self.perform(xml)
receiver = Postzord::Receiver::Public.new(xml)

View file

@ -1,8 +1,4 @@
require 'resque'
require 'resque_scheduler'
require 'resque/scheduler'
require 'resque-retry'
require 'resque/failure/redis'
Resque::Plugins::Timeout.timeout = 300
@ -13,9 +9,6 @@ if !AppConfig.single_process_mode?
elsif AppConfig[:redis_url]
Resque.redis = Redis.new(:host => AppConfig[:redis_url], :port => 6379)
end
Resque::Failure::MultipleWithRetrySuppression.classes = [Resque::Failure::Redis]
Resque::Failure.backend = Resque::Failure::MultipleWithRetrySuppression
end
if AppConfig.single_process_mode?

View file

@ -22,14 +22,11 @@ module ResqueJobLogging
backtrace = application_trace(error)
log_string << "app_backtrace='#{backtrace.join(";")}' "
notify_hoptoad(error, args) if AppConfig[:hoptoad_api_key].present?
do_log = !self.respond_to?('retry_limit_reached?') || self.retry_limit_reached?
else
log_string += "status=complete "
do_log = true
end
Rails.logger.info(log_string) if do_log
Rails.logger.info(log_string)
raise error if error
end

View file

@ -1,12 +1,8 @@
require 'resque/tasks'
require 'resque_scheduler/tasks'
task "resque:setup" do
require File.join(File.dirname(__FILE__), '..', '..', 'config', 'environment')
Rails.logger.info("event=resque_setup rails_env=#{Rails.env}")
require 'resque_scheduler'
require 'resque/scheduler'
end
desc "Alias for resque:work (To run workers on Heroku)"

View file

@ -28,18 +28,29 @@ describe Jobs::HttpMulti do
Typhoeus::Hydra.stub!(:new).and_return(@hydra)
people_ids = @people.map{ |p| p.id }
lambda {
Jobs::HttpMulti.perform(bob.id, @post_xml, people_ids, "Postzord::Dispatcher::Private")
}.should_not raise_error
Jobs::HttpMulti.perform(bob.id, @post_xml, people_ids, "Postzord::Dispatcher::Private")
end
it 'retries' do
person = @people[0]
@hydra.stub(:post, person.receive_url).and_return(@failed_response)
Typhoeus::Hydra.stub!(:new).and_return(@hydra)
lambda {
Jobs::HttpMulti.perform(bob.id, @post_xml, [person.id], "Postzord::Dispatcher::Private")
}.should raise_error /retry/
Resque.should_receive(:enqueue).with(Jobs::HttpMulti, bob.id, @post_xml, [person.id], anything, 1).once
Jobs::HttpMulti.perform(bob.id, @post_xml, [person.id], "Postzord::Dispatcher::Private")
end
it 'max retries' do
person = @people[0]
@hydra.stub(:post, person.receive_url).and_return(@failed_response)
Typhoeus::Hydra.stub!(:new).and_return(@hydra)
Resque.should_not_receive(:enqueue)
Jobs::HttpMulti.perform(bob.id, @post_xml, [person.id], "Postzord::Dispatcher::Private", 3)
end
it 'generates encrypted xml for people' do
@ -65,12 +76,7 @@ describe Jobs::HttpMulti do
Typhoeus::Hydra.stub!(:new).and_return(@hydra)
begin
Jobs::HttpMulti.perform(bob.id, @post_xml, [person.id], "Postzord::Dispatcher::Private")
rescue RuntimeError => e
e.message == 'retry'
end
Jobs::HttpMulti.perform(bob.id, @post_xml, [person.id], "Postzord::Dispatcher::Private")
person.reload
person.url.should == "https://remote.net/"
end

View file

@ -1,11 +1,7 @@
module Resque
def enqueue(klass, *args)
if $process_queue
begin
klass.send(:perform, *args)
rescue RuntimeError => e
e.message == 'retry'
end
klass.send(:perform, *args)
else
true
end