Merge pull request #2632 from torrancew/remove_chef

Remove chef
This commit is contained in:
Daniel Grippi 2012-01-11 02:30:02 -08:00
commit c47ebd5a18
23 changed files with 0 additions and 3307 deletions

View file

@ -27,10 +27,6 @@ gem 'twitter', '2.0.2'
gem 'messagebus_ruby_api', '1.0.1' gem 'messagebus_ruby_api', '1.0.1'
group :production do # we don't install these on travis to speed up test runs group :production do # we don't install these on travis to speed up test runs
# chef
gem 'chef', '~> 0.10.4', :require => false
gem 'ohai', '~> 0.6.10', :require => false
# reporting # reporting
gem 'hoptoad_notifier' gem 'hoptoad_notifier'
gem 'newrelic_rpm' gem 'newrelic_rpm'

View file

@ -87,7 +87,6 @@ GEM
arel (2.0.10) arel (2.0.10)
bcrypt-ruby (2.1.4) bcrypt-ruby (2.1.4)
builder (2.1.2) builder (2.1.2)
bunny (0.7.8)
capistrano (2.9.0) capistrano (2.9.0)
highline highline
net-scp (>= 1.0.0) net-scp (>= 1.0.0)
@ -107,22 +106,6 @@ GEM
carrierwave (0.5.8) carrierwave (0.5.8)
activesupport (~> 3.0) activesupport (~> 3.0)
cgi_multipart_eof_fix (2.5.0) cgi_multipart_eof_fix (2.5.0)
chef (0.10.8)
bunny (>= 0.6.0)
erubis
highline
json (>= 1.4.4, <= 1.6.1)
mixlib-authentication (>= 1.1.0)
mixlib-cli (>= 1.1.0)
mixlib-config (>= 1.1.2)
mixlib-log (>= 1.3.0)
moneta
net-ssh (~> 2.1.3)
net-ssh-multi (~> 1.1.0)
ohai (>= 0.6.0)
rest-client (>= 1.0.4, < 1.7.0)
treetop (~> 1.4.9)
uuidtools
childprocess (0.2.5) childprocess (0.2.5)
ffi (~> 1.0.6) ffi (~> 1.0.6)
chronic (0.6.6) chronic (0.6.6)
@ -243,16 +226,10 @@ GEM
mime-types (1.17.2) mime-types (1.17.2)
mini_magick (3.3) mini_magick (3.3)
subexec (~> 0.1.0) subexec (~> 0.1.0)
mixlib-authentication (1.1.4)
mixlib-log
mixlib-cli (1.2.2)
mixlib-config (1.1.2)
mixlib-log (1.3.0)
mobile-fu (0.3.0) mobile-fu (0.3.0)
rack-mobile-detect rack-mobile-detect
rails rails
mock_redis (0.3.0) mock_redis (0.3.0)
moneta (0.6.0)
mongrel (1.1.5) mongrel (1.1.5)
cgi_multipart_eof_fix (>= 2.4) cgi_multipart_eof_fix (>= 2.4)
daemons (>= 1.0.3) daemons (>= 1.0.3)
@ -268,9 +245,6 @@ GEM
net-ssh (2.1.4) net-ssh (2.1.4)
net-ssh-gateway (1.1.0) net-ssh-gateway (1.1.0)
net-ssh (>= 1.99.1) net-ssh (>= 1.99.1)
net-ssh-multi (1.1)
net-ssh (>= 2.1.4)
net-ssh-gateway (>= 0.99.0)
newrelic_rpm (3.3.1) newrelic_rpm (3.3.1)
nokogiri (1.5.0) nokogiri (1.5.0)
oauth (0.4.5) oauth (0.4.5)
@ -280,12 +254,6 @@ GEM
oauth2-provider (0.0.19) oauth2-provider (0.0.19)
activesupport (~> 3.0) activesupport (~> 3.0)
addressable (~> 2.2) addressable (~> 2.2)
ohai (0.6.10)
mixlib-cli
mixlib-config
mixlib-log
systemu (~> 2.2.0)
yajl-ruby
omniauth (1.0.1) omniauth (1.0.1)
hashie (~> 1.2) hashie (~> 1.2)
rack rack
@ -399,7 +367,6 @@ GEM
tilt (>= 1.2.2, < 2.0) tilt (>= 1.2.2, < 2.0)
sqlite3 (1.3.5) sqlite3 (1.3.5)
subexec (0.1.0) subexec (0.1.0)
systemu (2.2.0)
term-ansicolor (1.0.7) term-ansicolor (1.0.7)
thin (1.3.1) thin (1.3.1)
daemons (>= 1.0.9) daemons (>= 1.0.9)
@ -419,7 +386,6 @@ GEM
typhoeus (0.3.3) typhoeus (0.3.3)
mime-types mime-types
tzinfo (0.3.31) tzinfo (0.3.31)
uuidtools (2.1.2)
vegas (0.1.8) vegas (0.1.8)
rack (>= 1.0.0) rack (>= 1.0.0)
warden (1.0.6) warden (1.0.6)
@ -433,7 +399,6 @@ GEM
will_paginate (3.0.2) will_paginate (3.0.2)
xpath (0.1.4) xpath (0.1.4)
nokogiri (~> 1.3) nokogiri (~> 1.3)
yajl-ruby (1.1.0)
yard (0.7.4) yard (0.7.4)
yui-compressor (0.9.6) yui-compressor (0.9.6)
POpen4 (>= 0.1.4) POpen4 (>= 0.1.4)
@ -453,7 +418,6 @@ DEPENDENCIES
capistrano_colors capistrano_colors
capybara (~> 1.1.2) capybara (~> 1.1.2)
carrierwave (= 0.5.8) carrierwave (= 0.5.8)
chef (~> 0.10.4)
client_side_validations client_side_validations
cucumber-api-steps (= 0.6) cucumber-api-steps (= 0.6)
cucumber-rails (= 1.2.1) cucumber-rails (= 1.2.1)
@ -490,7 +454,6 @@ DEPENDENCIES
newrelic_rpm newrelic_rpm
nokogiri (~> 1.5.0) nokogiri (~> 1.5.0)
oauth2-provider (= 0.0.19) oauth2-provider (= 0.0.19)
ohai (~> 0.6.10)
omniauth (= 1.0.1) omniauth (= 1.0.1)
omniauth-facebook omniauth-facebook
omniauth-tumblr omniauth-tumblr

View file

@ -1 +0,0 @@
{"run_list": ["recipe[diaspora::bootstrap"]}

View file

@ -1,4 +0,0 @@
#statistics
42 15 * * * cd /usr/local/app/diaspora && exec /usr/local/bin/ruby /usr/local/bin/bundle exec rake --trace statistics:users_splunk &> /usr/local/app/diaspora/log/stats.log
42 15 * * * cd /usr/local/app/diaspora && exec /usr/local/bin/ruby /usr/local/bin/bundle exec rake --trace statistics:content_splunk &> /usr/local/app/diaspora/log/stats.log
0 * * * * cd /usr/local/app/diaspora && exec /usr/local/bin/ruby /usr/local/bin/bundle exec rake --trace backup:mysql

View file

@ -1,3 +0,0 @@
<cross-domain-policy>
<allow-access-from domain="*" to-ports="*" />
</cross-domain-policy>

View file

@ -1,25 +0,0 @@
# Firewall configuration, manually edited AGAINST ALL REASON
*filter
:INPUT ACCEPT [0:0]
:FORWARD ACCEPT [0:0]
:OUTPUT ACCEPT [0:0]
:RH-Firewall-1-INPUT - [0:0]
-A INPUT -j RH-Firewall-1-INPUT
-A FORWARD -j RH-Firewall-1-INPUT
-A RH-Firewall-1-INPUT -i lo -j ACCEPT
-A RH-Firewall-1-INPUT -p icmp --icmp-type any -j ACCEPT
-A RH-Firewall-1-INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
#SSH
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT
#HTTP
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 80 -j ACCEPT
#HTTPS
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 443 -j ACCEPT
#Resque-Web
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 7894 -j ACCEPT
#Websocket
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 8080 -j ACCEPT
#Crossdomain policy file for Flash sockets
-A RH-Firewall-1-INPUT -m state --state NEW -m tcp -p tcp --dport 843 -j ACCEPT
-A RH-Firewall-1-INPUT -j REJECT --reject-with icmp-host-prohibited
COMMIT

View file

@ -1,312 +0,0 @@
# Redis configuration file example
# Note on units: when memory size is needed, it is possible to specifiy
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# 1kb => 1024 bytes
# 1m => 1000000 bytes
# 1mb => 1024*1024 bytes
# 1g => 1000000000 bytes
# 1gb => 1024*1024*1024 bytes
#
# units are case insensitive so 1GB 1Gb 1gB are all the same.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /usr/local/var/run/redis.pid when daemonized.
daemonize no
# When running daemonized, Redis writes a pid file in /usr/local/var/run/redis.pid by
# default. You can specify a custom pid file location here.
pidfile /usr/local/var/run/redis.pid
# Accept connections on the specified port, default is 6379
port 6379
# If you want you can bind a single interface, if the bind option is not
# specified all the interfaces will listen for incoming connections.
#
# bind 127.0.0.1
# Close the connection after a client is idle for N seconds (0 to disable)
timeout 300
# Set server verbosity to 'debug'
# it can be one of:
# debug (a lot of information, useful for development/testing)
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
loglevel verbose
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# output for logging but daemonize, logs will be sent to /dev/null
logfile stdout
# Set the number of databases. The default database is DB 0, you can select
# a different one on a per-connection basis using SELECT <dbid> where
# dbid is a number between 0 and 'databases'-1
databases 16
################################ SNAPSHOTTING #################################
#
# Save the DB on disk:
#
# save <seconds> <changes>
#
# Will save the DB if both the given number of seconds and the given
# number of write operations against the DB occurred.
#
# In the example below the behaviour will be to save:
# after 900 sec (15 min) if at least 1 key changed
# after 300 sec (5 min) if at least 10 keys changed
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
save 900 1
save 300 10
save 60 10000
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# the dataset will likely be bigger if you have compressible values or keys.
rdbcompression yes
# The filename where to dump the DB
dbfilename dump.rdb
# The working directory.
#
# The DB will be written inside this directory, with the filename specified
# above using the 'dbfilename' configuration directive.
#
# Also the Append Only File will be created inside this directory.
#
# Note that you must specify a directory here, not a file name.
dir /usr/local/var/db/redis/
################################# REPLICATION #################################
# Master-Slave replication. Use slaveof to make a Redis instance a copy of
# another Redis server. Note that the configuration is local to the slave
# so for example it is possible to configure the slave to save the DB with a
# different interval, or to listen to another port, and so on.
#
# slaveof <masterip> <masterport>
# If the master is password protected (using the "requirepass" configuration
# directive below) it is possible to tell the slave to authenticate before
# starting the replication synchronization process, otherwise the master will
# refuse the slave request.
#
# masterauth <master-password>
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# commands. This might be useful in environments in which you do not trust
# others with access to the host running redis-server.
#
# This should stay commented out for backward compatibility and because most
# people do not need auth (e.g. they run their own servers).
#
# Warning: since Redis is pretty fast an outside user can try up to
# 150k passwords per second against a good box. This means that you should
# use a very strong password otherwise it will be very easy to break.
#
# requirepass foobared
################################### LIMITS ####################################
# Set the max number of connected clients at the same time. By default there
# is no limit, and it's up to the number of file descriptors the Redis process
# is able to open. The special value '0' means no limits.
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
# maxclients 128
# Don't use more memory than the specified amount of bytes.
# When the memory limit is reached Redis will try to remove keys with an
# EXPIRE set. It will try to start freeing keys that are going to expire
# in little time and preserve keys with a longer time to live.
# Redis will also try to remove objects from free lists if possible.
#
# If all this fails, Redis will start to reply with errors to commands
# that will use more memory, like SET, LPUSH, and so on, and will continue
# to reply to most read-only commands like GET.
#
# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
# 'state' server or cache, not as a real DB. When Redis is used as a real
# database the memory usage will grow over the weeks, it will be obvious if
# it is going to use too much memory in the long run, and you'll have the time
# to upgrade. With maxmemory after the limit is reached you'll start to get
# errors for write operations, and this may even lead to DB inconsistency.
#
# maxmemory <bytes>
############################## APPEND ONLY MODE ###############################
# By default Redis asynchronously dumps the dataset on disk. If you can live
# with the idea that the latest records will be lost if something like a crash
# happens this is the preferred way to run Redis. If instead you care a lot
# about your data and don't want to that a single record can get lost you should
# enable the append only mode: when this mode is enabled Redis will append
# every write operation received in the file appendonly.aof. This file will
# be read on startup in order to rebuild the full dataset in memory.
#
# Note that you can have both the async dumps and the append only file if you
# like (you have to comment the "save" statements above to disable the dumps).
# Still if append only mode is enabled Redis will load the data from the
# log file at startup ignoring the dump.rdb file.
#
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
# log file in background when it gets too big.
appendonly no
# The name of the append only file (default: "appendonly.aof")
# appendfilename appendonly.aof
# The fsync() call tells the Operating System to actually write data on disk
# instead to wait for more data in the output buffer. Some OS will really flush
# data on disk, some other OS will just try to do it ASAP.
#
# Redis supports three different modes:
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
# everysec: fsync only if one second passed since the last fsync. Compromise.
#
# The default is "everysec" that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# "no" that will will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
# If unsure, use "everysec".
# appendfsync always
appendfsync everysec
# appendfsync no
################################ VIRTUAL MEMORY ###############################
# Virtual Memory allows Redis to work with datasets bigger than the actual
# amount of RAM needed to hold the whole dataset in memory.
# In order to do so very used keys are taken in memory while the other keys
# are swapped into a swap file, similarly to what operating systems do
# with memory pages.
#
# To enable VM just set 'vm-enabled' to yes, and set the following three
# VM parameters accordingly to your needs.
vm-enabled no
# vm-enabled yes
# This is the path of the Redis swap file. As you can guess, swap files
# can't be shared by different Redis instances, so make sure to use a swap
# file for every redis process you are running. Redis will complain if the
# swap file is already in use.
#
# The best kind of storage for the Redis swap file (that's accessed at random)
# is a Solid State Disk (SSD).
#
# *** WARNING *** if you are using a shared hosting the default of putting
# the swap file under /tmp is not secure. Create a dir with access granted
# only to Redis user and configure Redis to create the swap file there.
vm-swap-file /tmp/redis.swap
# vm-max-memory configures the VM to use at max the specified amount of
# RAM. Everything that deos not fit will be swapped on disk *if* possible, that
# is, if there is still enough contiguous space in the swap file.
#
# With vm-max-memory 0 the system will swap everything it can. Not a good
# default, just specify the max amount of RAM you can in bytes, but it's
# better to leave some margin. For instance specify an amount of RAM
# that's more or less between 60 and 80% of your free RAM.
vm-max-memory 0
# Redis swap files is split into pages. An object can be saved using multiple
# contiguous pages, but pages can't be shared between different objects.
# So if your page is too big, small objects swapped out on disk will waste
# a lot of space. If you page is too small, there is less space in the swap
# file (assuming you configured the same number of total swap file pages).
#
# If you use a lot of small objects, use a page size of 64 or 32 bytes.
# If you use a lot of big objects, use a bigger page size.
# If unsure, use the default :)
vm-page-size 32
# Number of total memory pages in the swap file.
# Given that the page table (a bitmap of free/used pages) is taken in memory,
# every 8 pages on disk will consume 1 byte of RAM.
#
# The total swap size is vm-page-size * vm-pages
#
# With the default of 32-bytes memory pages and 134217728 pages Redis will
# use a 4 GB swap file, that will use 16 MB of RAM for the page table.
#
# It's better to use the smallest acceptable value for your application,
# but the default is large in order to work in most conditions.
vm-pages 134217728
# Max number of VM I/O threads running at the same time.
# This threads are used to read/write data from/to swap file, since they
# also encode and decode objects from disk to memory or the reverse, a bigger
# number of threads can help with big objects even if they can't help with
# I/O itself as the physical device may not be able to couple with many
# reads/writes operations at the same time.
#
# The special value of 0 turn off threaded I/O and enables the blocking
# Virtual Memory implementation.
vm-max-threads 4
############################### ADVANCED CONFIG ###############################
# Glue small output buffers together in order to send small replies in a
# single TCP packet. Uses a bit more CPU but most of the times it is a win
# in terms of number of queries per second. Use 'yes' if unsure.
glueoutputbuf yes
# Hashes are encoded in a special way (much more memory efficient) when they
# have at max a given numer of elements, and the biggest element does not
# exceed a given threshold. You can configure this limits with the following
# configuration directives.
hash-max-zipmap-entries 64
hash-max-zipmap-value 512
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
# that is rhashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# The default is to use this millisecond 10 times every second in order to
# active rehashing the main dictionaries, freeing memory when possible.
#
# If unsure:
# use "activerehashing no" if you have hard latency requirements and it is
# not a good thing in your environment that Redis can reply form time to time
# to queries with 2 milliseconds delay.
#
# use "activerehashing yes" if you don't have such hard requirements but
# want to free memory asap when possible.
activerehashing yes
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
# have a standard template that goes to all redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#
# include /path/to/local.conf
# include /path/to/other.conf

File diff suppressed because it is too large Load diff

View file

@ -1,9 +0,0 @@
thins:
- port: '3001'
- port: '3002'
- port: '3003'
url: 'joindiaspora.com'
cert_location: '/usr/local/nginx/conf/diaspora.crt'
key_location: '/usr/local/nginx/conf/diaspora.key'
s3_bucket: "https://joindiaspora.s3.amazonaws.com"
s3_path: "/uploads/images/"

View file

@ -1 +0,0 @@
{"run_list": ["recipe[diaspora::main]"]}

View file

@ -1,10 +0,0 @@
cron "user stats" do
minute 42
hour 15
command "cd /usr/local/app/diaspora && exec /usr/local/bin/ruby /usr/local/bin/bundle exec rake --trace statistics:users_splunk &> /usr/local/app/diaspora/log/stats.log"
end
cron "backup mysql" do
minute 0
command "cd /usr/local/app/diaspora && exec /usr/local/bin/ruby /usr/local/bin/bundle exec rake --trace backup:mysql"
end

View file

@ -1,58 +0,0 @@
common_pkgs = [
"cpio",
"gcc-c++",
"htop",
"psmisc",
"screen",
"bzip2"
]
dev_pkgs = value_for_platform(
"debian" => {
"default" => [
"libxml2-dev",
"libxslt1-dev",
"libsqlite3-dev",
"libmysqlclient-dev",
"libssl-dev",
"libcurl4-openssl-dev"
]
},
"centos" => {
"default" => [
"libxml2-devel",
"libxslt-devel",
"openssl-devel",
]
}
)
execute "apt-get update" do
action :nothing
end.run_action(:run) if platform?("debian")
common_pkgs.each do |pkg|
package pkg
end
dev_pkgs do |pkg|
package pkg
end
def harden_ruby(ruby_string)
Dir.glob("/usr/local/rvm/wrappers/#{ruby_string}/*").each do |file|
link "/usr/local/bin/#{file.split('/').last}" do
to file
end
end
Dir.glob("/usr/local/rvm/gems/#{ruby_string}/bin/*").each do |file|
link "/usr/local/bin/#{file.split('/').last}" do
to file
end
end
end
harden_ruby("ree-1.8.7-2010.02")
include_recipe "diaspora::java"

View file

@ -1,51 +0,0 @@
curl = 'curl-7.21.4'
execute 'download curl' do
command "mkdir -p /tmp/install && cd /tmp/install/ && wget http://curl.download.nextag.com/download/#{curl}.tar.gz"
not_if do
File.exists?("/tmp/install/#{curl}.tar.gz")
end
end
execute "unzip curl" do
command "cd /tmp/install && tar -xvf #{curl}.tar.gz"
not_if do
File.exists?("/tmp/install/#{curl}/README")
end
end
execute "configure curl" do
command "cd /tmp/install/#{curl} && ./configure --with-ssl"
not_if do
File.exists?('/usr/local/lib/libcurl.so.4')
end
end
execute "compile curl" do
command "cd /tmp/install/#{curl} && make"
not_if do
File.exists?('/usr/local/lib/libcurl.so.4')
end
end
execute "install curl" do
command "cd /tmp/install/#{curl} && make install"
not_if do
File.exists?('/usr/local/lib/libcurl.so.4')
end
end
execute 'update dynamic loader cache for curl' do
command "echo '/usr/local/lib' >> /etc/ld.so.conf"
not_if "grep /usr/local/lib /etc/ld.so.conf"
end
execute 'run dynamic linker' do
command '/sbin/ldconfig'
end
execute 'rebundle' do
command 'bundle install'
end
include_recipe "diaspora::startcom_bundle"

View file

@ -1,70 +0,0 @@
execute "get the daemontools repo" do
command "mkdir -p /package/admin && cd /package/admin && git clone git://github.com/MikeSofaer/daemontools.git daemontools-0.76 || true"
end
execute "compile daemontools" do
command "cd /package/admin/daemontools-0.76 && ./package/install"
end
execute "mysql run" do
command "mkdir -p /service/mysql && echo '#!/bin/sh' > /service/mysql/run && echo 'exec /usr/libexec/mysqld --datadir=/var/lib/mysql --socket=/var/lib/mysql/mysql.sock --log-error=/var/log/mysqld.log --pid-file=/var/run/mysqld/mysqld.pid --user=mysql' >> /service/mysql/run"
end
execute "executable" do
command "chmod -R 755 /service/mysql"
end
config = YAML.load_file("/usr/local/app/diaspora/chef/cookbooks/diaspora/files/default/thins.yml")
config['thins'].each do |thin|
port = thin["port"]
dir = "/service/thin_#{port}"
flags = []
flags << "-c /usr/local/app/diaspora" #directory to run from
flags << "-e production" #run in production mode
flags << "-p #{port}" #use a socket
execute "thin run" do
command "mkdir -p #{dir} && echo '#!/bin/sh' > #{dir}/run && echo 'exec /usr/local/bin/ruby /usr/local/bin/thin start #{flags.join(" ")}' >> #{dir}/run"
end
execute "executable" do
command "chmod -R 755 " + dir
end
end
execute "websocket run" do
command "mkdir -p /service/websocket && echo '#!/bin/sh' > /service/websocket/run && echo 'cd /usr/local/app/diaspora && RAILS_ENV=production exec /usr/local/bin/ruby /usr/local/app/diaspora/script/websocket_server.rb' >> /service/websocket/run"
end
execute "executable" do
command "chmod -R 755 /service/websocket"
end
execute "redis run" do
command "mkdir -p /service/redis && echo '#!/bin/sh' > /service/redis/run && echo 'cd /usr/sbin/ && exec /usr/sbin/redis-server /usr/local/etc/redis.conf' >> /service/redis/run"
end
execute "executable" do
command "chmod -R 755 /service/redis"
end
execute "nginx run" do
command "mkdir -p /service/nginx && echo '#!/bin/sh' > /service/nginx/run && echo 'exec /usr/local/nginx/sbin/nginx' >> /service/nginx/run"
end
execute "executable" do
command "chmod -R 755 /service/nginx"
end
execute "resque worker run" do
command "mkdir -p /service/resque_worker && echo '#!/bin/sh' > /service/resque_worker/run && echo 'cd /usr/local/app/diaspora && RAILS_ENV=production QUEUES=socket_webfinger,receive,receive_salmon,mail,http HOME=/usr/local/app/diaspora exec /usr/local/bin/rake resque:work' >> /service/resque_worker/run"
end
execute "executable" do
command "chmod -R 755 /service/resque_worker"
end
execute "resque web run" do
command "mkdir -p /service/resque_web && echo '#!/bin/sh' > /service/resque_web/run && echo 'RAILS_ENV=production HOME=/usr/local/app/diaspora exec resque-web -F' >> /service/resque_web/run"
end
execute "executable" do
command "chmod -R 755 /service/resque_web"
end

View file

@ -1,8 +0,0 @@
include_recipe "diaspora::image_magick"
include_recipe "diaspora::mysql"
include_recipe "diaspora::iptables"
include_recipe "diaspora::daemontools"
include_recipe "diaspora::backup"
include_recipe "diaspora::nginx"
include_recipe "diaspora::redis"
include_recipe "diaspora::curl" if platform?("centos")

View file

@ -1,12 +0,0 @@
package "imagemagick" do
case node['platform']
when "debian"
package_name "imagemagick"
when "centos"
package_name "ImageMagick"
end
end
if platform?("debian")
package "libmagick9-dev"
end

View file

@ -1,12 +0,0 @@
if platform?("centos")
cookbook_file "/etc/sysconfig/iptables" do
source "iptables"
notifies :run, "execute[restart iptables]", :immediately
end
execute "restart iptables" do
command "/etc/init.d/iptables restart"
end
end

View file

@ -1 +0,0 @@
package "java"

View file

@ -1,16 +0,0 @@
mysql_pkgs = value_for_platform(
"debian" => { "default" => %w[mysql-server libmysqlclient-dev libmysql-ruby] },
"centos" => { "default" => %w[mysql mysql-server mysql-devel] }
)
if platform?("centos")
execute "start mysql service to create the system tables" do
command "service mysqld start"
end
execute "stop service again" do
command "service mysqld stop"
end
end

View file

@ -1,34 +0,0 @@
execute "Get nginx from nginx web site" do
command "mkdir -p /tmp/install && curl http://sysoev.ru/nginx/nginx-0.8.53.tar.gz > /tmp/install/nginx-0.8.53.tar.gz"
end
execute "unzip nginx" do
command "cd /tmp/install && tar -xvf nginx-0.8.53.tar.gz"
end
execute "configure nginx" do
command "cd /tmp/install/nginx-0.8.53 && ./configure --with-http_ssl_module"
end
execute "compile nginx" do
command "cd /tmp/install/nginx-0.8.53 && make"
end
execute "install nginx" do
command "cd /tmp/install/nginx-0.8.53 && make install"
end
cookbook_file "/usr/local/nginx/html/crossdomain.xml" do
source "crossdomain.xml"
end
execute "change crossdomain.xml permissions" do
command "chmod 755 /usr/local/nginx/html/crossdomain.xml"
end
config = YAML.load_file("/usr/local/app/diaspora/chef/cookbooks/diaspora/files/default/thins.yml")
template "/usr/local/nginx/conf/nginx.conf" do
source "nginx.conf.erb"
variables :ports => config['thins'].map{|thin| "#{thin["port"]}"}, :url => config['url'], :cert_location => config['cert_location'], :key_location => config['key_location'],
:s3_bucket => config['s3_bucket'] , :s3_path => config['s3_path']
end

View file

@ -1,29 +0,0 @@
case node['platform']
when "debian"
remote_file "#{Chef::Config[:file_cache_path]}/redis-server_2.2.2-1_amd64.deb" do
source "wget http://ftp.us.debian.org/debian/pool/main/r/redis/redis-server_2.2.2-1_amd64.deb"
end
dpkg_package "redis-server" do
source "#{Chef::Config[:file_cache_path]}/redis-server_2.2.2-1_amd64.deb"
end
when "centos"
execute "refresh yum" do
command "yum update -y"
end
package "redis"
end
cookbook_file "/usr/local/etc/redis.conf" do
source "redis.conf"
mode 0755
end
directory "/usr/local/var/db/redis" do
recursive true
end

View file

@ -1,19 +0,0 @@
cookbook_file '/etc/pki/tls/certs/startcom-cert.crt' do
source 'startcom-bundle.crt'
end
execute 'back up cert bundle' do
command 'mv /etc/pki/tls/certs/ca-bundle.crt /etc/pki/tls/certs/ca-bundle.crt.bak'
end
execute 'add startcom cert' do
command 'cat /etc/pki/tls/certs/ca-bundle.crt.bak /etc/pki/tls/certs/startcom-cert.crt > /etc/pki/tls/certs/ca-bundle.crt'
not_if "cat /etc/pki/tls/certs/ca-bundle.crt | grep '#{<<LINE_FROM_CERT
J/eUsTc9t8eR9+IB7P2UieHMbtM21goZea7XNIJl/3xCu7bdC6Y0r0tg/n9DSQaL
jEO4VvLZfyFDF+qnSJUBdXXqK6VDleoVhJ0IjSZuVZur3NI50jEdYOKszFZFJPUc
VKvuht2WMoX4TE/olXa2Bd02I2e8/xXiyjvmpuw77CYRNEiN9oArGiMC64ocOnYq
e1YWHHIqs6rjYKUAnwSb4m8eFFhbpWyLWDzDuk46XPfhlis+7we8pOVdzE2fDeHc
qrvhbhrsj+G2TE15cl0XNQsd18FH2pYk4NByqFpfZi0Q3C8qE64m/gocGczQPguc
LINE_FROM_CERT
}'"
end

View file

@ -1,113 +0,0 @@
# Copyright (c) 2010-2011, Diaspora Inc. This file is
# licensed under the Affero General Public License version 3 or later. See
# the COPYRIGHT file.
worker_processes 1;
daemon off;
events {
worker_connections 8192;
}
http {
include mime.types;
default_type application/octet-stream;
log_format splunky '$msec code=$status url=$uri bytes=$body_bytes_sent ms=$request_time';
access_log /usr/local/nginx/logs/access.log splunky;
sendfile on;
keepalive_timeout 65;
gzip on;
gzip_http_version 1.0;
gzip_comp_level 2;
gzip_proxied any;
gzip_buffers 16 8k;
gzip_types text/plain text/css application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_disable "MSIE [1-6]\.(?!.*SV1)";
upstream thin_cluster {
<% @ports.each do |port| %>
server <%="localhost:#{port}"%>;
<% end %>
}
server {
listen 843;
location / {
rewrite ^(.*)$ /crossdomain.xml;
}
error_page 400 /crossdomain.xml;
location = /crossdomain.xml {
root html;
}
}
server {
listen 80;
server_name <%= @url %> www.<%= @url %>;
rewrite ^(.*) https://<%= @url %>$1 permanent;
}
server {
listen 443;
server_name <%= @url %> www.<%= @url %>;
root /usr/local/app/diaspora/public;
ssl on;
ssl_certificate <%= @cert_location %>;
ssl_certificate_key <%= @key_location %>;
location /assets {
expires 1d;
add_header Cache-Control public;
}
location /uploads/images {
expires 5d;
add_header Cache-Control public;
}
location / {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto https;
proxy_set_header Host $http_host;
proxy_redirect off;
client_max_body_size 4M;
client_body_buffer_size 128K;
if (-f $request_filename/index.html) {
rewrite (.*) $1/index.html break;
}
if (-f $request_filename.html) {
rewrite (.*) $1.html break;
}
if (!-f $request_filename) {
proxy_pass http://thin_cluster;
break;
}
<% unless @s3_bucket.blank? || @s3_path.blank? %>
<%= "rewrite ^/uploads/images/(.*)$ #{@s3_bucket}#{@s3_path}$1 permanent;" %>
<% end %>
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}