diff --git a/pillars/roles/aurpkgs/icinga.sls b/pillars/roles/aurpkgs/icinga.sls index c238114..9a65732 100644 --- a/pillars/roles/aurpkgs/icinga.sls +++ b/pillars/roles/aurpkgs/icinga.sls @@ -2,4 +2,3 @@ aur: pkgs: icinga2: [] icingaweb2: [] - icingaweb2-module-director: [] diff --git a/pillars/roles/database/icinga.sls b/pillars/roles/database/icinga.sls index 04371ff..fb2dda9 100644 --- a/pillars/roles/database/icinga.sls +++ b/pillars/roles/database/icinga.sls @@ -11,7 +11,3 @@ database: icinga: host: icinga.actcur.com grant: all privileges - icinga2_director: - icinga: - host: icinga.actcur.com - grant: all privileges diff --git a/pillars/roles/firewalld/nrpe.sls b/pillars/roles/firewalld/nrpe.sls new file mode 100644 index 0000000..ace1cbe --- /dev/null +++ b/pillars/roles/firewalld/nrpe.sls @@ -0,0 +1,4 @@ +firewalld: + 70_internal: + port: + 5666/tcp: [] diff --git a/pillars/roles/git/lightbooks.sls b/pillars/roles/git/lightbooks.sls new file mode 100644 index 0000000..dc97db0 --- /dev/null +++ b/pillars/roles/git/lightbooks.sls @@ -0,0 +1,17 @@ +git: + lightbooks: + repo: "ssh://gogs@git.actcur.com:5022/actcur/lightbooks.git" + path: "/usr/share/webapps/lightbooks" + branch: "master" + key: "git_actcur" + force: true + email: "actcur@actcur.com" + name: "Actaeus Curabitur" + lightbooks.dev: + repo: "ssh://gogs@git.actcur.com:5022/actcur/lightbooks.git" + path: "/usr/share/webapps/lightbooks-dev" + branch: "dev" + key: "git_actcur" + force: true + email: "actcur@actcur.com" + name: "Actaeus Curabitur" diff --git a/pillars/roles/git/portal.sls b/pillars/roles/git/portal.sls index 9fbc59d..5c9cccf 100644 --- a/pillars/roles/git/portal.sls +++ b/pillars/roles/git/portal.sls @@ -1,5 +1,5 @@ git: - tmux: + tmux-root: repo: "ssh://gogs@git.actcur.com:5022/actcur/tmux.git" path: "/root/tmux" branch: "master" @@ -7,6 +7,14 @@ git: force: true email: "actcur@actcur.com" name: "Actaeus Curabitur" + tmux-ejparker: + repo: "ssh://gogs@git.actcur.com:5022/actcur/tmux.git" + path: "/ejparker/tmux" + branch: "master" + key: "git_actcur" + force: true + email: "actcur@actcur.com" + name: "Actaeus Curabitur" web: repo: "ssh://gogs@git.actcur.com:5022/actcur/portal.git" path: "/srv/http/portal" diff --git a/pillars/roles/git/ytdownloader.sls b/pillars/roles/git/ytdownloader.sls new file mode 100644 index 0000000..2f2f60b --- /dev/null +++ b/pillars/roles/git/ytdownloader.sls @@ -0,0 +1,9 @@ +git: + ytdownloader: + repo: "ssh://gogs@git.actcur.com:5022/actcur/ytdownloader.git" + path: "/root/scripts/ytdownloader" + branch: "master" + key: "git_actcur" + force: true + email: "actcur@actcur.com" + name: "Actaeus Curabitur" diff --git a/pillars/roles/init.sls b/pillars/roles/init.sls index 661e271..316be6b 100644 --- a/pillars/roles/init.sls +++ b/pillars/roles/init.sls @@ -8,3 +8,4 @@ include: - roles.backup - roles.ca - roles.database + - roles.services diff --git a/pillars/roles/nginx/deluge.sls b/pillars/roles/nginx/deluge.sls index c23e575..b8f3e13 100644 --- a/pillars/roles/nginx/deluge.sls +++ b/pillars/roles/nginx/deluge.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: deluge: name: Torrents summary: Deluge Torrent Server diff --git a/pillars/roles/nginx/jackett.sls b/pillars/roles/nginx/jackett.sls index 8739e1a..e8ed345 100644 --- a/pillars/roles/nginx/jackett.sls +++ b/pillars/roles/nginx/jackett.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: jackett: name: Torrent Indexers summary: Jackett Server diff --git a/pillars/roles/nginx/lightbooks.sls b/pillars/roles/nginx/lightbooks.sls new file mode 100644 index 0000000..9d10acf --- /dev/null +++ b/pillars/roles/nginx/lightbooks.sls @@ -0,0 +1,24 @@ +nginx: + books: + auth: 2fa + https: + port: 8000 + prot: http + books.dev: + auth: 2fa + https: + port: 8080 + prot: http + default: no + +portal: + Media: + books: + name: Books and Podcasts + summary: LightBooks Server + public: false + Dev: + books.dev: + name: Books and Podcasts - Dev + summary: LightBooks Server + public: false diff --git a/pillars/roles/nginx/ombi.sls b/pillars/roles/nginx/ombi.sls index dfab622..88d5d37 100644 --- a/pillars/roles/nginx/ombi.sls +++ b/pillars/roles/nginx/ombi.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: ombi: name: TV/Movie Requests summary: OMBI Plex Requests Server diff --git a/pillars/roles/nginx/plexmediaserver.sls b/pillars/roles/nginx/plexmediaserver.sls index afb6737..f223a09 100644 --- a/pillars/roles/nginx/plexmediaserver.sls +++ b/pillars/roles/nginx/plexmediaserver.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: plex: name: Plex summary: Plex Media Server diff --git a/pillars/roles/nginx/radarr.sls b/pillars/roles/nginx/radarr.sls index feb0f1c..fe98491 100644 --- a/pillars/roles/nginx/radarr.sls +++ b/pillars/roles/nginx/radarr.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: radarr: name: Movie Downloader summary: Radarr Server diff --git a/pillars/roles/nginx/sonarr.sls b/pillars/roles/nginx/sonarr.sls index b8309ff..cf2ef8c 100644 --- a/pillars/roles/nginx/sonarr.sls +++ b/pillars/roles/nginx/sonarr.sls @@ -6,7 +6,7 @@ nginx: prot: http portal: - Video: + Media: sonarr: name: TV Show Downloader summary: Sonarr Server diff --git a/pillars/roles/services/aurrepo.sls b/pillars/roles/services/aurrepo.sls new file mode 100644 index 0000000..313499a --- /dev/null +++ b/pillars/roles/services/aurrepo.sls @@ -0,0 +1,3 @@ +services: + aurrepo: + updateaur.timer: [] diff --git a/pillars/roles/services/authelia.sls b/pillars/roles/services/authelia.sls new file mode 100644 index 0000000..5461e2e --- /dev/null +++ b/pillars/roles/services/authelia.sls @@ -0,0 +1,5 @@ +services: + authelia: + mongodb: [] + redis: [] + authelia: [] diff --git a/pillars/roles/services/backup.sls b/pillars/roles/services/backup.sls new file mode 100644 index 0000000..4c6525b --- /dev/null +++ b/pillars/roles/services/backup.sls @@ -0,0 +1,3 @@ +services: + backup: + backup.timer: [] diff --git a/pillars/roles/services/certbot.sls b/pillars/roles/services/certbot.sls new file mode 100644 index 0000000..6c61f7e --- /dev/null +++ b/pillars/roles/services/certbot.sls @@ -0,0 +1,3 @@ +services: + certbot: + certbot.timer: [] diff --git a/pillars/roles/services/core.sls b/pillars/roles/services/core.sls new file mode 100644 index 0000000..4d2a4a7 --- /dev/null +++ b/pillars/roles/services/core.sls @@ -0,0 +1,4 @@ +services: + core: + firewalld: [] + sshd: [] diff --git a/pillars/roles/services/deluge.sls b/pillars/roles/services/deluge.sls new file mode 100644 index 0000000..59652c2 --- /dev/null +++ b/pillars/roles/services/deluge.sls @@ -0,0 +1,4 @@ +services: + deluge: + deluged: [] + deluge-web: [] diff --git a/pillars/roles/services/freeipa-server.sls b/pillars/roles/services/freeipa-server.sls new file mode 100644 index 0000000..65034a5 --- /dev/null +++ b/pillars/roles/services/freeipa-server.sls @@ -0,0 +1,3 @@ +services: + freeipa-server: + httpd: [] diff --git a/pillars/roles/services/git.sls b/pillars/roles/services/git.sls new file mode 100644 index 0000000..abfb353 --- /dev/null +++ b/pillars/roles/services/git.sls @@ -0,0 +1,3 @@ +services: + git: + gogs: [] diff --git a/pillars/roles/services/icinga.sls b/pillars/roles/services/icinga.sls new file mode 100644 index 0000000..015dcb0 --- /dev/null +++ b/pillars/roles/services/icinga.sls @@ -0,0 +1,4 @@ +services: + icinga: + icinga2: [] + php-fpm: [] diff --git a/pillars/roles/services/init.sls b/pillars/roles/services/init.sls new file mode 100644 index 0000000..a545ecc --- /dev/null +++ b/pillars/roles/services/init.sls @@ -0,0 +1,15 @@ +{% set states = salt['cp.list_states'](saltenv) %} +include: + - roles.services.none +{%- if grains['roles'] is defined -%} + {%- if grains['roles'] is not none -%} + {%- if 'icinga' in grains['roles'] -%} + {%- for state in states %} + {%- if state.startswith("pillars.roles.services.") -%} + {%- set role = state.split('.')[3] %} + - roles.services.{{ role }} + {%- endif -%} + {%- endfor -%} + {%- endif -%} + {%- endif -%} +{%- endif -%} diff --git a/pillars/roles/services/lightbooks.sls b/pillars/roles/services/lightbooks.sls new file mode 100644 index 0000000..619d778 --- /dev/null +++ b/pillars/roles/services/lightbooks.sls @@ -0,0 +1,3 @@ +services: + lightbooks: + php-fpm: [] diff --git a/pillars/roles/services/mirrorlist.sls b/pillars/roles/services/mirrorlist.sls new file mode 100644 index 0000000..a56336f --- /dev/null +++ b/pillars/roles/services/mirrorlist.sls @@ -0,0 +1,3 @@ +services: + mirrorlist: + getmirrors.timer: [] diff --git a/pillars/roles/services/mysql.sls b/pillars/roles/services/mysql.sls new file mode 100644 index 0000000..45566df --- /dev/null +++ b/pillars/roles/services/mysql.sls @@ -0,0 +1,4 @@ +services: + mysql: + mysqld: [] + dumpdb.timer: [] diff --git a/pillars/roles/services/nginx-proxy.sls b/pillars/roles/services/nginx-proxy.sls new file mode 100644 index 0000000..9e2f79b --- /dev/null +++ b/pillars/roles/services/nginx-proxy.sls @@ -0,0 +1,3 @@ +services: + nginx-proxy: + nginx: [] diff --git a/pillars/roles/services/none.sls b/pillars/roles/services/none.sls new file mode 100644 index 0000000..e69de29 diff --git a/pillars/roles/services/ombi.sls b/pillars/roles/services/ombi.sls new file mode 100644 index 0000000..8bf1c02 --- /dev/null +++ b/pillars/roles/services/ombi.sls @@ -0,0 +1,3 @@ +services: + ombi: + ombi: [] diff --git a/pillars/roles/services/pass.sls b/pillars/roles/services/pass.sls new file mode 100644 index 0000000..2e37e5c --- /dev/null +++ b/pillars/roles/services/pass.sls @@ -0,0 +1,3 @@ +services: + pass: + php-fpm: [] diff --git a/pillars/roles/services/pkg-cache.sls b/pillars/roles/services/pkg-cache.sls new file mode 100644 index 0000000..a707634 --- /dev/null +++ b/pillars/roles/services/pkg-cache.sls @@ -0,0 +1,3 @@ +services: + pkg-cache: + nginx: [] diff --git a/pillars/roles/services/plexmediaserver.sls b/pillars/roles/services/plexmediaserver.sls new file mode 100644 index 0000000..0f73668 --- /dev/null +++ b/pillars/roles/services/plexmediaserver.sls @@ -0,0 +1,3 @@ +services: + plexmediaserver: + plexmediaserver: [] diff --git a/pillars/roles/services/saltmaster.sls b/pillars/roles/services/saltmaster.sls new file mode 100644 index 0000000..82cb9f1 --- /dev/null +++ b/pillars/roles/services/saltmaster.sls @@ -0,0 +1,3 @@ +services: + saltmaster: + salt-master: [] diff --git a/pillars/roles/services/saltminion.sls b/pillars/roles/services/saltminion.sls new file mode 100644 index 0000000..40fcc45 --- /dev/null +++ b/pillars/roles/services/saltminion.sls @@ -0,0 +1,4 @@ +services: + saltminion: + salt-minion: [] + highstate.timer: [] diff --git a/pillars/roles/services/sshserver.sls b/pillars/roles/services/sshserver.sls new file mode 100644 index 0000000..419db53 --- /dev/null +++ b/pillars/roles/services/sshserver.sls @@ -0,0 +1,3 @@ +services: + sshserver: + sshd: [] diff --git a/pillars/roles/services/ytdownloader.sls b/pillars/roles/services/ytdownloader.sls new file mode 100644 index 0000000..b018bf5 --- /dev/null +++ b/pillars/roles/services/ytdownloader.sls @@ -0,0 +1,3 @@ +services: + ytdownloader: + ytdownloader.timer: [] diff --git a/pillars/servers/env/server/centipa.sls b/pillars/servers/env/server/books.sls similarity index 100% rename from pillars/servers/env/server/centipa.sls rename to pillars/servers/env/server/books.sls diff --git a/pillars/servers/env/server/debianipa.sls b/pillars/servers/env/server/debianipa.sls deleted file mode 100644 index 2fdef9a..0000000 --- a/pillars/servers/env/server/debianipa.sls +++ /dev/null @@ -1 +0,0 @@ -env: prod diff --git a/pillars/servers/env/server/ipatest.sls b/pillars/servers/env/server/ipatest.sls deleted file mode 100644 index 2fdef9a..0000000 --- a/pillars/servers/env/server/ipatest.sls +++ /dev/null @@ -1 +0,0 @@ -env: prod diff --git a/pillars/servers/maintainer/server/centipa.sls b/pillars/servers/maintainer/server/books.sls similarity index 100% rename from pillars/servers/maintainer/server/centipa.sls rename to pillars/servers/maintainer/server/books.sls diff --git a/pillars/servers/maintainer/server/debianipa.sls b/pillars/servers/maintainer/server/debianipa.sls deleted file mode 100644 index c0b416d..0000000 --- a/pillars/servers/maintainer/server/debianipa.sls +++ /dev/null @@ -1,3 +0,0 @@ -maintainer: - - masaufuku - diff --git a/pillars/servers/maintainer/server/ipatest.sls b/pillars/servers/maintainer/server/ipatest.sls deleted file mode 100644 index c0b416d..0000000 --- a/pillars/servers/maintainer/server/ipatest.sls +++ /dev/null @@ -1,3 +0,0 @@ -maintainer: - - masaufuku - diff --git a/pillars/servers/roles/server/archtest.sls b/pillars/servers/roles/server/archtest.sls index 07c13df..0a9b10c 100644 --- a/pillars/servers/roles/server/archtest.sls +++ b/pillars/servers/roles/server/archtest.sls @@ -2,4 +2,5 @@ grains: roles: - server - ssh + - nrpe - saltminion diff --git a/pillars/servers/roles/server/authelia.sls b/pillars/servers/roles/server/authelia.sls index 289ba0f..16f313e 100644 --- a/pillars/servers/roles/server/authelia.sls +++ b/pillars/servers/roles/server/authelia.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - authelia - nginx-proxy diff --git a/pillars/servers/roles/server/baikal.sls b/pillars/servers/roles/server/baikal.sls index 6d93df7..1c789f6 100644 --- a/pillars/servers/roles/server/baikal.sls +++ b/pillars/servers/roles/server/baikal.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - baikal diff --git a/pillars/servers/roles/server/base b/pillars/servers/roles/server/base index 07c13df..0a9b10c 100644 --- a/pillars/servers/roles/server/base +++ b/pillars/servers/roles/server/base @@ -2,4 +2,5 @@ grains: roles: - server - ssh + - nrpe - saltminion diff --git a/pillars/servers/roles/server/centipa.sls b/pillars/servers/roles/server/books.sls similarity index 55% rename from pillars/servers/roles/server/centipa.sls rename to pillars/servers/roles/server/books.sls index 07c13df..cb2aabe 100644 --- a/pillars/servers/roles/server/centipa.sls +++ b/pillars/servers/roles/server/books.sls @@ -2,4 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion + - lightbooks + - nginx-proxy diff --git a/pillars/servers/roles/server/ca.sls b/pillars/servers/roles/server/ca.sls index 22ce80c..f2560f0 100644 --- a/pillars/servers/roles/server/ca.sls +++ b/pillars/servers/roles/server/ca.sls @@ -2,5 +2,6 @@ grains: roles: - server - ssh + - nrpe - saltminion - ca diff --git a/pillars/servers/roles/server/debianipa.sls b/pillars/servers/roles/server/debianipa.sls deleted file mode 100644 index 3148d3f..0000000 --- a/pillars/servers/roles/server/debianipa.sls +++ /dev/null @@ -1,6 +0,0 @@ -grains: - roles: - - server - - ssh - - saltminion - - freeipa_server diff --git a/pillars/servers/roles/server/deluge.sls b/pillars/servers/roles/server/deluge.sls index a7f99d2..47dfb7a 100644 --- a/pillars/servers/roles/server/deluge.sls +++ b/pillars/servers/roles/server/deluge.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - deluge - nginx-proxy diff --git a/pillars/servers/roles/server/git.sls b/pillars/servers/roles/server/git.sls index 639313a..6ec96d3 100644 --- a/pillars/servers/roles/server/git.sls +++ b/pillars/servers/roles/server/git.sls @@ -2,7 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - git - nginx-proxy - - nfs diff --git a/pillars/servers/roles/server/host.sls b/pillars/servers/roles/server/host.sls index 6b696ea..113dc5c 100644 --- a/pillars/servers/roles/server/host.sls +++ b/pillars/servers/roles/server/host.sls @@ -2,5 +2,6 @@ grains: roles: - server - ssh + - nrpe - saltminion - backup diff --git a/pillars/servers/roles/server/icinga.sls b/pillars/servers/roles/server/icinga.sls index 50f2d69..18d2e44 100644 --- a/pillars/servers/roles/server/icinga.sls +++ b/pillars/servers/roles/server/icinga.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - icinga - nginx-proxy diff --git a/pillars/servers/roles/server/ipa.sls b/pillars/servers/roles/server/ipa.sls index 6eed884..a57b175 100644 --- a/pillars/servers/roles/server/ipa.sls +++ b/pillars/servers/roles/server/ipa.sls @@ -2,5 +2,6 @@ grains: roles: - server - ssh + - nrpe - saltminion - freeipa-server diff --git a/pillars/servers/roles/server/ipatest.sls b/pillars/servers/roles/server/ipatest.sls deleted file mode 100644 index 07c13df..0000000 --- a/pillars/servers/roles/server/ipatest.sls +++ /dev/null @@ -1,5 +0,0 @@ -grains: - roles: - - server - - ssh - - saltminion diff --git a/pillars/servers/roles/server/jackett.sls b/pillars/servers/roles/server/jackett.sls index d129ec2..63910f5 100644 --- a/pillars/servers/roles/server/jackett.sls +++ b/pillars/servers/roles/server/jackett.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - jackett diff --git a/pillars/servers/roles/server/ombi.sls b/pillars/servers/roles/server/ombi.sls index 6669635..6feac81 100644 --- a/pillars/servers/roles/server/ombi.sls +++ b/pillars/servers/roles/server/ombi.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - ombi diff --git a/pillars/servers/roles/server/pass.sls b/pillars/servers/roles/server/pass.sls index d401198..3c0c344 100644 --- a/pillars/servers/roles/server/pass.sls +++ b/pillars/servers/roles/server/pass.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - pass diff --git a/pillars/servers/roles/server/pkg.sls b/pillars/servers/roles/server/pkg.sls index 90032c3..7fd25e3 100644 --- a/pillars/servers/roles/server/pkg.sls +++ b/pillars/servers/roles/server/pkg.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - pkg-cache - aurrepo diff --git a/pillars/servers/roles/server/plex.sls b/pillars/servers/roles/server/plex.sls index 89e86b0..4b5cb1f 100644 --- a/pillars/servers/roles/server/plex.sls +++ b/pillars/servers/roles/server/plex.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - plexmediaserver diff --git a/pillars/servers/roles/server/portal.sls b/pillars/servers/roles/server/portal.sls index 6c96879..4d08cb2 100644 --- a/pillars/servers/roles/server/portal.sls +++ b/pillars/servers/roles/server/portal.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - portal - nginx-proxy diff --git a/pillars/servers/roles/server/radarr.sls b/pillars/servers/roles/server/radarr.sls index 1623dc7..58a9526 100644 --- a/pillars/servers/roles/server/radarr.sls +++ b/pillars/servers/roles/server/radarr.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - radarr diff --git a/pillars/servers/roles/server/salt.sls b/pillars/servers/roles/server/salt.sls index eda4276..759911f 100644 --- a/pillars/servers/roles/server/salt.sls +++ b/pillars/servers/roles/server/salt.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - saltmaster - mirrorlist diff --git a/pillars/servers/roles/server/sonarr.sls b/pillars/servers/roles/server/sonarr.sls index aeb4863..8a66905 100644 --- a/pillars/servers/roles/server/sonarr.sls +++ b/pillars/servers/roles/server/sonarr.sls @@ -2,8 +2,8 @@ grains: roles: - server - ssh + - nrpe - saltminion - nginx-proxy - sonarr - - nfs - ytdownloader diff --git a/pillars/servers/roles/server/sql.sls b/pillars/servers/roles/server/sql.sls index 3d1df9a..044c620 100644 --- a/pillars/servers/roles/server/sql.sls +++ b/pillars/servers/roles/server/sql.sls @@ -2,5 +2,6 @@ grains: roles: - server - ssh + - nrpe - saltminion - mysql diff --git a/pillars/servers/roles/server/ssh.sls b/pillars/servers/roles/server/ssh.sls index 0c8bb7c..bbee929 100644 --- a/pillars/servers/roles/server/ssh.sls +++ b/pillars/servers/roles/server/ssh.sls @@ -2,5 +2,6 @@ grains: roles: - server - ssh + - nrpe - saltminion - sshserver diff --git a/pillars/servers/roles/server/sync.sls b/pillars/servers/roles/server/sync.sls index bd5750a..10bf144 100644 --- a/pillars/servers/roles/server/sync.sls +++ b/pillars/servers/roles/server/sync.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - sync - nginx-proxy diff --git a/pillars/servers/roles/server/tt.sls b/pillars/servers/roles/server/tt.sls index 4a6a411..8989c9a 100644 --- a/pillars/servers/roles/server/tt.sls +++ b/pillars/servers/roles/server/tt.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - ttrss - nginx-proxy diff --git a/pillars/servers/roles/server/vpn.sls b/pillars/servers/roles/server/vpn.sls index fad59ee..940c86f 100644 --- a/pillars/servers/roles/server/vpn.sls +++ b/pillars/servers/roles/server/vpn.sls @@ -2,6 +2,7 @@ grains: roles: - server - ssh + - nrpe - saltminion - vpnserver - ca-cert diff --git a/states/roles/build/gitlab/init.sls b/states/roles/build/gitlab/init.sls deleted file mode 100644 index f9a1db7..0000000 --- a/states/roles/build/gitlab/init.sls +++ /dev/null @@ -1,29 +0,0 @@ -#Note: This *only* initializes the database - only use build script in a fresh environment, it'll nuke existing mysql database -#initialize redis database as gitlab user -redis-running: - service.running: - - name: redis - - enable: true -gitlab_init_db: - cmd.run: - - name: "bundle-2.3 exec rake gitlab:setup RAILS_ENV=production force=yes" - - cwd: "/usr/share/webapps/gitlab" - - runas: gitlab - -#start services -gitlab.target: - service.running: - - enable: true - - reload: true -gitlab-workhorse: - service.running: - - enable: true - - reload: true -gitlab-unicorn: - service.running: - - enable: true - - reload: true -gitlab-sidekiq: - service.running: - - enable: true - - reload: true diff --git a/states/roles/build/pepper/build_pepper.sh b/states/roles/build/pepper/build_pepper.sh deleted file mode 100644 index 17be9c0..0000000 --- a/states/roles/build/pepper/build_pepper.sh +++ /dev/null @@ -1,10 +0,0 @@ -cd /root/ -curl -sS https://getcomposer.org/installer | php -mv composer.phar /usr/local/bin/composer -composer global require "laravel/installer" -ln -s /root/.config/composer/vendor/bin/laravel /usr/local/bin/laravel -cd /opt/ -laravel new pepper -cd /opt/pepper -#require packages we need -composer require symfony/yaml diff --git a/states/roles/build/pepper/init.sls b/states/roles/build/pepper/init.sls deleted file mode 100644 index b460120..0000000 --- a/states/roles/build/pepper/init.sls +++ /dev/null @@ -1,48 +0,0 @@ -include: -{%- set os=grains['os'] -%} -{%- if os=="CentOS" or os=="RedHat" %} - - repos.nginx - - repos.webtatic -{% endif %} - -php.packages: - pkg.installed: - - pkgs: - - php56w - - php56w-mbstring - - php56w-mysql - - php56w-mcrypt - - php56w-fpm - - php56w-xml - -install_mariadb: - pkg.installed: - - pkgs: - - mariadb-server - -selinux-policy-targeted: - pkg.installed - -policycoreutils-python: - pkg.installed - -httpd_can_network_connect: - selinux.boolean: - - value: True - - persist: True - -/root/salt/scripts/build_pepper.sh: - file.managed: - - makedirs: true - - source: salt://roles/build/pepper/build_pepper.sh - - user: root - - group: root - - mode: 744 - -build_pepper: - cmd.run: - - name: "sh /root/salt/scripts/build_pepper.sh" - -install_nginx: - pkg.installed: - - name: nginx diff --git a/states/roles/build/saltpad/build_saltpad.sh b/states/roles/build/saltpad/build_saltpad.sh deleted file mode 100644 index 8d662f1..0000000 --- a/states/roles/build/saltpad/build_saltpad.sh +++ /dev/null @@ -1,9 +0,0 @@ -cd /opt/ -git clone https://github.com/tinyclues/saltpad.git -b saltpad_v1 -#git clone https://github.com/Lothiraldan/saltpad.git -cd saltpad -virtualenv venv -source venv/bin/activate -pip install -r requirements.txt -pip install chaussette -pip install pyyaml diff --git a/states/roles/build/saltpad/init.sls b/states/roles/build/saltpad/init.sls deleted file mode 100644 index 5358724..0000000 --- a/states/roles/build/saltpad/init.sls +++ /dev/null @@ -1,49 +0,0 @@ -include: -{%- set os=grains['os'] -%} -{%- if os=="CentOS" or os=="RedHat" %} - - repos.nginx -{% endif %} - -selinux-policy-targeted: - pkg.installed - -policycoreutils-python: - pkg.installed - -httpd_can_network_connect: - selinux.boolean: - - value: True - - persist: True - -python-virtualenv: - pkg.installed - -/root/salt/scripts/build_saltpad.sh: - file.managed: - - makedirs: true - - source: salt://roles/build/saltpad/build_saltpad.sh - - user: root - - group: root - - mode: 744 - -build_saltpad: - cmd.run: - - name: "sh /root/salt/scripts/build_saltpad.sh" - -/root/salt/scripts/start_saltpad.sh: - file.managed: - - source: salt://roles/build/saltpad/start_saltpad.sh - - user: root - - group: root - - mode: 744 - -/usr/lib/systemd/system/saltpad.service: - file.managed: - - source: salt://roles/build/saltpad/saltpad.service - - user: root - - group: root - - mode: 644 - -install_nginx: - pkg.installed: - - name: nginx diff --git a/states/roles/build/saltpad/saltpad.service b/states/roles/build/saltpad/saltpad.service deleted file mode 100644 index 876955c..0000000 --- a/states/roles/build/saltpad/saltpad.service +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=The Saltpad -After=syslog.target network.target - -[Service] -Type=forking -LimitNOFILE=8192 -ExecStart=/bin/bash /root/salt/scripts/start_saltpad.sh start - -[Install] -WantedBy=multi-user.target diff --git a/states/roles/build/saltpad/start_saltpad.sh b/states/roles/build/saltpad/start_saltpad.sh deleted file mode 100644 index a6dcd32..0000000 --- a/states/roles/build/saltpad/start_saltpad.sh +++ /dev/null @@ -1,67 +0,0 @@ -#/bin/bash - -c=`ps aux | grep chaussette | wc -l` - -function stop { - if [ c -gt 1 ]; - then - echo "Stopping server.." - pkill chaussette - echo ".. Done." - else - echo "Server not running" - fi -} - -function start { - if [c -gt 1 ] - then - echo "Server is already running" - else - echo "Starting Server.." - cd /opt/saltpad - source venv/bin/activate - chaussette saltpad.merged:app & - echo ".. Done." - fi -} - -function restart { - echo "Restarting server.." - if [ c -gt 1 ] - then - stop - sleep 5 - start - else - start - fi - echo ".. Done." -} - -function status { - if [ c -gt 1 ] - then - echo "Server is not running" - exit 1 - else - echo "Server is running" - fi -} - -case "$1" in - start) - start - ;; - stop) - stop - ;; - restart) - restart - ;; - status) - status - ;; - *) - echo "Usage: $0 {start|stop|restart|status}" -esac diff --git a/states/roles/maintain/gitlab/conf_files/config.yml b/states/roles/maintain/gitlab/conf_files/config.yml deleted file mode 100644 index 0c802a8..0000000 --- a/states/roles/maintain/gitlab/conf_files/config.yml +++ /dev/null @@ -1,73 +0,0 @@ -# -# If you change this file in a Merge Request, please also create -# a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -# - -# GitLab user. git by default -user: gitlab - -# URL to GitLab instance, used for API calls. Default: http://localhost:8080. -# For relative URL support read http://doc.gitlab.com/ce/install/relative_url.html -# You only have to change the default if you have configured Unicorn -# to listen on a custom port, or if you have configured Unicorn to -# only listen on a Unix domain socket. For Unix domain sockets use -# "http+unix://", e.g. -# "http+unix://%2Fpath%2Fto%2Fsocket" -gitlab_url: "http://localhost:8080" - -# See installation.md#using-https for additional HTTPS configuration details. -http_settings: -# read_timeout: 300 -# user: someone -# password: somepass -# ca_file: /etc/ssl/cert.pem -# ca_path: /etc/pki/tls/certs - self_signed_cert: false - -# File used as authorized_keys for gitlab user -auth_file: "/var/lib/gitlab/.ssh/authorized_keys" - -# File that contains the secret key for verifying access to GitLab. -# Default is .gitlab_shell_secret in the gitlab-shell directory. -# secret_file: "/var/lib/gitlab/gitlab-shell/.gitlab_shell_secret" - -# Parent directory for global custom hook directories (pre-receive.d, update.d, post-receive.d) -# Default is hooks in the gitlab-shell directory. -# custom_hooks_dir: "/var/lib/gitlab/gitlab-shell/hooks" - -# Redis settings used for pushing commit notices to gitlab -redis: - bin: /usr/bin/redis-cli - host: 127.0.0.1 - port: 6379 - # pass: redispass # Allows you to specify the password for Redis - database: 5 - socket: /run/redis/redis.sock # Comment out this line if you want to use TCP or Sentinel - namespace: resque:gitlab - # sentinels: - # - - # host: 127.0.0.1 - # port: 26380 - # - - # host: 127.0.0.1 - # port: 26381 - - -# Log file. -# Default is gitlab-shell.log in the root directory. -log_file: "/var/log/gitlab/gitlab-shell.log" - -# Log level. INFO by default -log_level: INFO - -# Audit usernames. -# Set to true to see real usernames in the logs instead of key ids, which is easier to follow, but -# incurs an extra API call on every gitlab-shell command. -audit_usernames: false - -# Git trace log file. -# If set, git commands receive GIT_TRACE* environment variables -# See https://git-scm.com/book/es/v2/Git-Internals-Environment-Variables#Debugging for documentation -# An absolute path starting with / – the trace output will be appended to that file. -# It needs to exist so we can check permissions and avoid to throwing warnings to the users. -git_trace_log_file: diff --git a/states/roles/maintain/gitlab/conf_files/database.yml b/states/roles/maintain/gitlab/conf_files/database.yml deleted file mode 100644 index 6633c0d..0000000 --- a/states/roles/maintain/gitlab/conf_files/database.yml +++ /dev/null @@ -1,44 +0,0 @@ -# -# PRODUCTION -# -production: - adapter: mysql2 - encoding: utf8 - collation: utf8_general_ci - reconnect: false - database: gitlab - pool: 10 - username: gitlab - password: "{%- include 'secure/passwords/gitlab_db_password.txt' -%}" - host: sql.actcur.com - # socket: /tmp/mysql.sock - -# -# Development specific -# -development: - adapter: mysql2 - encoding: utf8 - collation: utf8_general_ci - reconnect: false - database: gitlabhq_development - pool: 5 - username: root - password: "secure password" - # host: localhost - # socket: /tmp/mysql.sock - -# Warning: The database defined as "test" will be erased and -# re-generated from your development database when you run "rake". -# Do not set this db to the same as development or production. -test: &test - adapter: mysql2 - encoding: utf8mb4 - collation: utf8mb4_general_ci - reconnect: false - database: gitlabhq_test - pool: 5 - username: root - password: - # host: localhost - # socket: /tmp/mysql.sock diff --git a/states/roles/maintain/gitlab/conf_files/gitlab.conf b/states/roles/maintain/gitlab/conf_files/gitlab.conf deleted file mode 100644 index cda4f4e..0000000 --- a/states/roles/maintain/gitlab/conf_files/gitlab.conf +++ /dev/null @@ -1,69 +0,0 @@ -## GitLab -## -## Lines starting with two hashes (##) are comments with information. -## Lines starting with one hash (#) are configuration parameters that can be uncommented. -## -################################## -## CONTRIBUTING ## -################################## -## -## If you change this file in a Merge Request, please also create -## a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -## -################################### -## configuration ## -################################### -## -## See installation.md#using-https for additional HTTPS configuration details. - -upstream gitlab-workhorse { - server unix:/run/gitlab/gitlab-workhorse.socket fail_timeout=0; -} - -## Normal HTTP host -server { - ## Either remove "default_server" from the listen line below, - ## or delete the /etc/nginx/sites-enabled/default file. This will cause gitlab - ## to be served if you visit any address that your server responds to, eg. - ## the ip address of the server (http://x.x.x.x/)n 0.0.0.0:80 default_server; - listen 0.0.0.0:8000; - listen [::]:8000; - server_name git2.actcur.com; ## Replace this with something like gitlab.example.com - server_tokens off; ## Don't show the nginx version number, a security best practice - - ## See app/controllers/application_controller.rb for headers set - - ## Individual nginx logs for this GitLab vhost - access_log /var/log/nginx/gitlab_access.log; - error_log /var/log/nginx/gitlab_error.log; - - location / { - client_max_body_size 0; - gzip off; - - ## https://github.com/gitlabhq/gitlabhq/issues/694 - ## Some requests take more than 30 seconds. - proxy_read_timeout 300; - proxy_connect_timeout 300; - proxy_redirect off; - - proxy_http_version 1.1; - - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_pass http://gitlab-workhorse; - } - - error_page 404 /404.html; - error_page 422 /422.html; - error_page 500 /500.html; - error_page 502 /502.html; - location ~ ^/(404|422|500|502)\.html$ { - root /usr/share/webapps/gitlab/public; - internal; - } - -} diff --git a/states/roles/maintain/gitlab/conf_files/gitlab.yml b/states/roles/maintain/gitlab/conf_files/gitlab.yml deleted file mode 100644 index 233d4e8..0000000 --- a/states/roles/maintain/gitlab/conf_files/gitlab.yml +++ /dev/null @@ -1,627 +0,0 @@ -# # # # # # # # # # # # # # # # # # -# GitLab application config file # -# # # # # # # # # # # # # # # # # # -# -########################### NOTE ##################################### -# This file should not receive new settings. All configuration options # -# * are being moved to ApplicationSetting model! # -# If a setting requires an application restart say so in that screen. # -# If you change this file in a Merge Request, please also create # -# a MR on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests # -######################################################################## -# -# -# How to use: -# 1. Copy file as gitlab.yml -# 2. Update gitlab -> host with your fully qualified domain name -# 3. Update gitlab -> email_from -# 4. If you installed Git from source, change git -> bin_path to /usr/local/bin/git -# IMPORTANT: If Git was installed in a different location use that instead. -# You can check with `which git`. If a wrong path of Git is specified, it will -# result in various issues such as failures of GitLab CI builds. -# 5. Review this configuration file for other settings you may want to adjust - -production: &base - # - # 1. GitLab app settings - # ========================== - - ## GitLab settings - gitlab: - ## Web server settings (note: host is the FQDN, do not include http://) - host: git.actcur.com - port: 443 # Set to 443 if using HTTPS, see installation.md#using-https for additional HTTPS configuration details - https: true # Set to true if using HTTPS, see installation.md#using-https for additional HTTPS configuration details - - # Uncommment this line below if your ssh host is different from HTTP/HTTPS one - # (you'd obviously need to replace ssh.host_example.com with your own host). - # Otherwise, ssh host will be set to the `host:` value above - # ssh_host: ssh.host_example.com - - # Relative URL support - # WARNING: We recommend using an FQDN to host GitLab in a root path instead - # of using a relative URL. - # Documentation: http://doc.gitlab.com/ce/install/relative_url.html - # Uncomment and customize the following line to run in a non-root path - # - # relative_url_root: /gitlab - - # Trusted Proxies - # Customize if you have GitLab behind a reverse proxy which is running on a different machine. - # Add the IP address for your reverse proxy to the list, otherwise users will appear signed in from that address. - trusted_proxies: - # Examples: - #- 192.168.1.0/24 - #- 192.168.2.1 - #- 2001:0db8::/32 - - # Uncomment and customize if you can't use the default user to run GitLab (default: 'git') - user: gitlab - - ## Date & Time settings - # Uncomment and customize if you want to change the default time zone of GitLab application. - # To see all available zones, run `bundle exec rake time:zones:all RAILS_ENV=production` - # time_zone: 'UTC' - - ## Email settings - # Uncomment and set to false if you need to disable email sending from GitLab (default: true) - # email_enabled: true - # Email address used in the "From" field in mails sent by GitLab - email_from: notifications@actcur.com - email_display_name: Actcur Git - email_reply_to: noreply@actcur.com - email_subject_suffix: '' - - # Email server smtp settings are in config/initializers/smtp_settings.rb.sample - - # default_can_create_group: false # default: true - # username_changing_enabled: false # default: true - User can change her username/namespace - - ## Automatic issue closing - # If a commit message matches this regular expression, all issues referenced from the matched text will be closed. - # This happens when the commit is pushed or merged into the default branch of a project. - # When not specified the default issue_closing_pattern as specified below will be used. - # Tip: you can test your closing pattern at http://rubular.com. - # issue_closing_pattern: '((?:[Cc]los(?:e[sd]?|ing)|[Ff]ix(?:e[sd]|ing)?|[Rr]esolv(?:e[sd]?|ing))(:?) +(?:(?:issues? +)?%{issue_ref}(?:(?:, *| +and +)?)|([A-Z][A-Z0-9_]+-\d+))+)' - - ## Default project features settings - default_projects_features: - issues: true - merge_requests: true - wiki: true - snippets: true - builds: true - container_registry: true - - ## Webhook settings - # Number of seconds to wait for HTTP response after sending webhook HTTP POST request (default: 10) - # webhook_timeout: 10 - - ## Repository downloads directory - # When a user clicks e.g. 'Download zip' on a project, a temporary zip file is created in the following directory. - # The default is 'shared/cache/archive/' relative to the root of the Rails app. - # repository_downloads_path: shared/cache/archive/ - - ## Reply by email - # Allow users to comment on issues and merge requests by replying to notification emails. - # For documentation on how to set this up, see http://doc.gitlab.com/ce/administration/reply_by_email.html - incoming_email: - enabled: false - - # The email address including the `%{key}` placeholder that will be replaced to reference the item being replied to. - # The placeholder can be omitted but if present, it must appear in the "user" part of the address (before the `@`). - address: "gitlab-incoming+%{key}@gmail.com" - - # Email account username - # With third party providers, this is usually the full email address. - # With self-hosted email servers, this is usually the user part of the email address. - user: "gitlab-incoming@gmail.com" - # Email account password - password: "[REDACTED]" - - # IMAP server host - host: "imap.gmail.com" - # IMAP server port - port: 993 - # Whether the IMAP server uses SSL - ssl: true - # Whether the IMAP server uses StartTLS - start_tls: false - - # The mailbox where incoming mail will end up. Usually "inbox". - mailbox: "inbox" - # The IDLE command timeout. - idle_timeout: 60 - - ## Build Artifacts - artifacts: - enabled: true - # The location where build artifacts are stored (default: shared/artifacts). - # path: shared/artifacts - - ## Git LFS - lfs: - enabled: true - # The location where LFS objects are stored (default: shared/lfs-objects). - # storage_path: shared/lfs-objects - - ## GitLab Pages - pages: - enabled: false - # The location where pages are stored (default: shared/pages). - # path: shared/pages - - # The domain under which the pages are served: - # http://group.example.com/project - # or project path can be a group page: group.example.com - host: example.com - port: 80 # Set to 443 if you serve the pages with HTTPS - https: false # Set to true if you serve the pages with HTTPS - # external_http: ["1.1.1.1:80", "[2001::1]:80"] # If defined, enables custom domain support in GitLab Pages - # external_https: ["1.1.1.1:443", "[2001::1]:443"] # If defined, enables custom domain and certificate support in GitLab Pages - - ## Mattermost - ## For enabling Add to Mattermost button - mattermost: - enabled: false - host: 'https://mattermost.example.com' - - ## Gravatar - ## For Libravatar see: http://doc.gitlab.com/ce/customization/libravatar.html - gravatar: - # gravatar urls: possible placeholders: %{hash} %{size} %{email} %{username} - # plain_url: "http://..." # default: http://www.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon - # ssl_url: "https://..." # default: https://secure.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon - - ## Auxiliary jobs - # Periodically executed jobs, to self-heal Gitlab, do external synchronizations, etc. - # Please read here for more information: https://github.com/ondrejbartas/sidekiq-cron#adding-cron-job - cron_jobs: - # Flag stuck CI jobs as failed - stuck_ci_jobs_worker: - cron: "0 * * * *" - # Execute scheduled triggers - pipeline_schedule_worker: - cron: "19 * * * *" - # Remove expired build artifacts - expire_build_artifacts_worker: - cron: "50 * * * *" - # Periodically run 'git fsck' on all repositories. If started more than - # once per hour you will have concurrent 'git fsck' jobs. - repository_check_worker: - cron: "20 * * * *" - # Send admin emails once a week - admin_email_worker: - cron: "0 0 * * 0" - - # Remove outdated repository archives - repository_archive_cache_worker: - cron: "0 * * * *" - - registry: - # enabled: true - # host: registry.example.com - # port: 5005 - # api_url: http://localhost:5000/ # internal address to the registry, will be used by GitLab to directly communicate with API - # key: config/registry.key - # path: shared/registry - # issuer: gitlab-issuer - - # - # 2. GitLab CI settings - # ========================== - - gitlab_ci: - # Default project notifications settings: - # - # Send emails only on broken builds (default: true) - # all_broken_builds: true - # - # Add pusher to recipients list (default: false) - # add_pusher: true - - # The location where build traces are stored (default: builds/). Relative paths are relative to Rails.root - # builds_path: builds/ - - # - # 3. Auth settings - # ========================== - - ## LDAP settings - # You can inspect a sample of the LDAP users with login access by running: - # bundle exec rake gitlab:ldap:check RAILS_ENV=production - ldap: - enabled: false - servers: - ########################################################################## - # - # Since GitLab 7.4, LDAP servers get ID's (below the ID is 'main'). GitLab - # Enterprise Edition now supports connecting to multiple LDAP servers. - # - # If you are updating from the old (pre-7.4) syntax, you MUST give your - # old server the ID 'main'. - # - ########################################################################## - main: # 'main' is the GitLab 'provider ID' of this LDAP server - ## label - # - # A human-friendly name for your LDAP server. It is OK to change the label later, - # for instance if you find out it is too large to fit on the web page. - # - # Example: 'Paris' or 'Acme, Ltd.' - label: 'LDAP' - - host: '_your_ldap_server' - port: 389 - uid: 'sAMAccountName' - method: 'plain' # "tls" or "ssl" or "plain" - bind_dn: '_the_full_dn_of_the_user_you_will_bind_with' - password: '_the_password_of_the_bind_user' - - # Set a timeout, in seconds, for LDAP queries. This helps avoid blocking - # a request if the LDAP server becomes unresponsive. - # A value of 0 means there is no timeout. - timeout: 10 - - # This setting specifies if LDAP server is Active Directory LDAP server. - # For non AD servers it skips the AD specific queries. - # If your LDAP server is not AD, set this to false. - active_directory: true - - # If allow_username_or_email_login is enabled, GitLab will ignore everything - # after the first '@' in the LDAP username submitted by the user on login. - # - # Example: - # - the user enters 'jane.doe@example.com' and 'p@ssw0rd' as LDAP credentials; - # - GitLab queries the LDAP server with 'jane.doe' and 'p@ssw0rd'. - # - # If you are using "uid: 'userPrincipalName'" on ActiveDirectory you need to - # disable this setting, because the userPrincipalName contains an '@'. - allow_username_or_email_login: false - - # To maintain tight control over the number of active users on your GitLab installation, - # enable this setting to keep new users blocked until they have been cleared by the admin - # (default: false). - block_auto_created_users: false - - # Base where we can search for users - # - # Ex. ou=People,dc=gitlab,dc=example - # - base: '' - - # Filter LDAP users - # - # Format: RFC 4515 http://tools.ietf.org/search/rfc4515 - # Ex. (employeeType=developer) - # - # Note: GitLab does not support omniauth-ldap's custom filter syntax. - # - user_filter: '' - - # LDAP attributes that GitLab will use to create an account for the LDAP user. - # The specified attribute can either be the attribute name as a string (e.g. 'mail'), - # or an array of attribute names to try in order (e.g. ['mail', 'email']). - # Note that the user's LDAP login will always be the attribute specified as `uid` above. - attributes: - # The username will be used in paths for the user's own projects - # (like `gitlab.example.com/username/project`) and when mentioning - # them in issues, merge request and comments (like `@username`). - # If the attribute specified for `username` contains an email address, - # the GitLab username will be the part of the email address before the '@'. - username: ['uid', 'userid', 'sAMAccountName'] - email: ['mail', 'email', 'userPrincipalName'] - - # If no full name could be found at the attribute specified for `name`, - # the full name is determined using the attributes specified for - # `first_name` and `last_name`. - name: 'cn' - first_name: 'givenName' - last_name: 'sn' - - # GitLab EE only: add more LDAP servers - # Choose an ID made of a-z and 0-9 . This ID will be stored in the database - # so that GitLab can remember which LDAP server a user belongs to. - # uswest2: - # label: - # host: - # .... - - - ## OmniAuth settings - omniauth: - # Allow login via Twitter, Google, etc. using OmniAuth providers - enabled: false - - # Uncomment this to automatically sign in with a specific omniauth provider's without - # showing GitLab's sign-in page (default: show the GitLab sign-in page) - # auto_sign_in_with_provider: saml - - # Sync user's email address from the specified Omniauth provider every time the user logs - # in (default: nil). And consequently make this field read-only. - # sync_email_from_provider: cas3 - - # CAUTION! - # This allows users to login without having a user account first. Define the allowed providers - # using an array, e.g. ["saml", "twitter"], or as true/false to allow all providers or none. - # User accounts will be created automatically when authentication was successful. - allow_single_sign_on: ["saml"] - - # Locks down those users until they have been cleared by the admin (default: true). - block_auto_created_users: true - # Look up new users in LDAP servers. If a match is found (same uid), automatically - # link the omniauth identity with the LDAP account. (default: false) - auto_link_ldap_user: false - - # Allow users with existing accounts to login and auto link their account via SAML - # login, without having to do a manual login first and manually add SAML - # (default: false) - auto_link_saml_user: false - - # Set different Omniauth providers as external so that all users creating accounts - # via these providers will not be able to have access to internal projects. You - # will need to use the full name of the provider, like `google_oauth2` for Google. - # Refer to the examples below for the full names of the supported providers. - # (default: []) - external_providers: [] - - ## Auth providers - # Uncomment the following lines and fill in the data of the auth provider you want to use - # If your favorite auth provider is not listed you can use others: - # see https://github.com/gitlabhq/gitlab-public-wiki/wiki/Custom-omniauth-provider-configurations - # The 'app_id' and 'app_secret' parameters are always passed as the first two - # arguments, followed by optional 'args' which can be either a hash or an array. - # Documentation for this is available at http://doc.gitlab.com/ce/integration/omniauth.html - providers: - # See omniauth-cas3 for more configuration details - # - { name: 'cas3', - # label: 'cas3', - # args: { - # url: 'https://sso.example.com', - # disable_ssl_verification: false, - # login_url: '/cas/login', - # service_validate_url: '/cas/p3/serviceValidate', - # logout_url: '/cas/logout'} } - # - { name: 'authentiq', - # # for client credentials (client ID and secret), go to https://www.authentiq.com/ - # app_id: 'YOUR_CLIENT_ID', - # app_secret: 'YOUR_CLIENT_SECRET', - # args: { - # scope: 'aq:name email~rs address aq:push' - # # redirect_uri parameter is optional except when 'gitlab.host' in this file is set to 'localhost' - # # redirect_uri: 'YOUR_REDIRECT_URI' - # } - # } - # - { name: 'github', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # url: "https://github.com/", - # verify_ssl: true, - # args: { scope: 'user:email' } } - # - { name: 'bitbucket', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - { name: 'gitlab', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # args: { scope: 'api' } } - # - { name: 'google_oauth2', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # args: { access_type: 'offline', approval_prompt: '' } } - # - { name: 'facebook', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - { name: 'twitter', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - # - { name: 'saml', - # label: 'Our SAML Provider', - # groups_attribute: 'Groups', - # external_groups: ['Contractors', 'Freelancers'], - # args: { - # assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', - # idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', - # idp_sso_target_url: 'https://login.example.com/idp', - # issuer: 'https://gitlab.example.com', - # name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:transient' - # } } - # - # - { name: 'crowd', - # args: { - # crowd_server_url: 'CROWD SERVER URL', - # application_name: 'YOUR_APP_NAME', - # application_password: 'YOUR_APP_PASSWORD' } } - # - # - { name: 'auth0', - # args: { - # client_id: 'YOUR_AUTH0_CLIENT_ID', - # client_secret: 'YOUR_AUTH0_CLIENT_SECRET', - # namespace: 'YOUR_AUTH0_DOMAIN' } } - - # SSO maximum session duration in seconds. Defaults to CAS default of 8 hours. - # cas3: - # session_duration: 28800 - - # Shared file storage settings - shared: - path: /var/lib/gitlab/shared # Default: shared - - # Gitaly settings - gitaly: - # This setting controls whether GitLab uses Gitaly (new component - # introduced in 9.0). Eventually Gitaly use will become mandatory and - # this option will disappear. - enabled: true - - # - # 4. Advanced settings - # ========================== - - ## Repositories settings - repositories: - # Paths where repositories can be stored. Give the canonicalized absolute pathname. - # IMPORTANT: None of the path components may be symlink, because - # gitlab-shell invokes Dir.pwd inside the repository path and that results - # real path not the symlink. - storages: # You must have at least a `default` storage path. - default: - path: /var/lib/gitlab/repositories/ - gitaly_address: unix:/var/lib/gitlab/sockets/gitlab-gitaly.socket # TCP connections are supported too (e.g. tcp://host:port) - - ## Backup settings - backup: - path: "/var/lib/gitlab/backups" # Relative paths are relative to Rails.root (default: tmp/backups/) - # archive_permissions: 0640 # Permissions for the resulting backup.tar file (default: 0600) - # keep_time: 604800 # default: 0 (forever) (in seconds) - # pg_schema: public # default: nil, it means that all schemas will be backed up - # upload: - # # Fog storage connection settings, see http://fog.io/storage/ . - # connection: - # provider: AWS - # region: eu-west-1 - # aws_access_key_id: AKIAKIAKI - # aws_secret_access_key: 'secret123' - # # The remote 'directory' to store your backups. For S3, this would be the bucket name. - # remote_directory: 'my.s3.bucket' - # # Use multipart uploads when file size reaches 100MB, see - # # http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html - # multipart_chunk_size: 104857600 - # # Turns on AWS Server-Side Encryption with Amazon S3-Managed Keys for backups, this is optional - # # encryption: 'AES256' - # # Specifies Amazon S3 storage class to use for backups, this is optional - # # storage_class: 'STANDARD' - - ## GitLab Shell settings - gitlab_shell: - path: /usr/share/webapps/gitlab-shell/ - hooks_path: /usr/share/webapps/gitlab-shell/hooks/ - - # File that contains the secret key for verifying access for gitlab-shell. - # Default is '.gitlab_shell_secret' relative to Rails.root (i.e. root of the GitLab app). - # secret_file: /home/git/gitlab/.gitlab_shell_secret - - # Git over HTTP - upload_pack: true - receive_pack: true - - # Git import/fetch timeout - # git_timeout: 800 - - # If you use non-standard ssh port you need to specify it - # ssh_port: 22 - - workhorse: - # File that contains the secret key for verifying access for gitlab-workhorse. - # Default is '.gitlab_workhorse_secret' relative to Rails.root (i.e. root of the GitLab app). - # secret_file: /home/git/gitlab/.gitlab_workhorse_secret - - ## Git settings - # CAUTION! - # Use the default values unless you really know what you are doing - git: - bin_path: /usr/bin/git - # The next value is the maximum memory size grit can use - # Given in number of bytes per git object (e.g. a commit) - # This value can be increased if you have very large commits - max_size: 20971520 # 20.megabytes - # Git timeout to read a commit, in seconds - timeout: 10 - - ## Webpack settings - # If enabled, this will tell rails to serve frontend assets from the webpack-dev-server running - # on a given port instead of serving directly from /assets/webpack. This is only indended for use - # in development. - webpack: - # dev_server: - # enabled: true - # host: localhost - # port: 3808 - - # - # 5. Extra customization - # ========================== - - extra: - ## Google analytics. Uncomment if you want it - # google_analytics_id: '_your_tracking_id' - - ## Piwik analytics. - # piwik_url: '_your_piwik_url' - # piwik_site_id: '_your_piwik_site_id' - - rack_attack: - git_basic_auth: - # Rack Attack IP banning enabled - # enabled: true - # - # Whitelist requests from 127.0.0.1 for web proxies (NGINX/Apache) with incorrect headers - # ip_whitelist: ["127.0.0.1"] - # - # Limit the number of Git HTTP authentication attempts per IP - # maxretry: 10 - # - # Reset the auth attempt counter per IP after 60 seconds - # findtime: 60 - # - # Ban an IP for one hour (3600s) after too many auth attempts - # bantime: 3600 - -development: - <<: *base - -test: - <<: *base - gravatar: - enabled: true - lfs: - enabled: false - gitlab: - host: localhost - port: 80 - - # When you run tests we clone and setup gitlab-shell - # In order to setup it correctly you need to specify - # your system username you use to run GitLab - # user: YOUR_USERNAME - pages: - path: tmp/tests/pages - repositories: - storages: - default: - path: tmp/tests/repositories/ - gitaly_address: unix:tmp/tests/gitaly/gitaly.socket - gitaly: - enabled: true - backup: - path: tmp/tests/backups - gitlab_shell: - path: tmp/tests/gitlab-shell/ - hooks_path: tmp/tests/gitlab-shell/hooks/ - issues_tracker: - redmine: - title: "Redmine" - project_url: "http://redmine/projects/:issues_tracker_id" - issues_url: "http://redmine/:project_id/:issues_tracker_id/:id" - new_issue_url: "http://redmine/projects/:issues_tracker_id/issues/new" - jira: - title: "JIRA" - url: https://sample_company.atlassian.net - project_key: PROJECT - ldap: - enabled: false - servers: - main: - label: ldap - host: 127.0.0.1 - port: 3890 - uid: 'uid' - method: 'plain' # "tls" or "ssl" or "plain" - base: 'dc=example,dc=com' - user_filter: '' - group_base: 'ou=groups,dc=example,dc=com' - admin_group: '' - -staging: - <<: *base diff --git a/states/roles/maintain/gitlab/conf_files/production.rb b/states/roles/maintain/gitlab/conf_files/production.rb deleted file mode 100644 index 0b88842..0000000 --- a/states/roles/maintain/gitlab/conf_files/production.rb +++ /dev/null @@ -1,83 +0,0 @@ -Rails.application.configure do - # Settings specified here will take precedence over those in config/application.rb - - # Code is not reloaded between requests - config.cache_classes = true - - # Full error reports are disabled and caching is turned on - config.consider_all_requests_local = false - config.action_controller.perform_caching = true - - # Disable Rails's static asset server (Apache or nginx will already do this) - config.serve_static_files = false - - # Compress JavaScripts and CSS. - config.assets.js_compressor = :uglifier - # config.assets.css_compressor = :sass - - # Don't fallback to assets pipeline if a precompiled asset is missed - config.assets.compile = false - - # Generate digests for assets URLs - config.assets.digest = true - - # Enable compression of compiled assets using gzip. - config.assets.compress = true - - # Defaults to nil and saved in location specified by config.assets.prefix - # config.assets.manifest = YOUR_PATH - - # Specifies the header that your server uses for sending files - # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache - # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx - - # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. - # config.force_ssl = true - - # See everything in the log (default is :info) - config.log_level = :info - - # Suppress 'Rendered template ...' messages in the log - # source: http://stackoverflow.com/a/16369363 - %w{render_template render_partial render_collection}.each do |event| - ActiveSupport::Notifications.unsubscribe "#{event}.action_view" - end - - # Prepend all log lines with the following tags - # config.log_tags = [ :subdomain, :uuid ] - - # Use a different logger for distributed setups - # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new) - - # Enable serving of images, stylesheets, and JavaScripts from an asset server - config.action_controller.asset_host = ENV['GITLAB_CDN_HOST'] if ENV['GITLAB_CDN_HOST'].present? - - # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added) - # config.assets.precompile += %w( search.js ) - - # Disable delivery errors, bad email addresses will be ignored - # config.action_mailer.raise_delivery_errors = false - - # Enable threaded mode - # config.threadsafe! unless $rails_rake_task - - # Enable locale fallbacks for I18n (makes lookups for any locale fall back to - # the I18n.default_locale when a translation can not be found) - config.i18n.fallbacks = true - - # Send deprecation notices to registered listeners - config.active_support.deprecation = :notify - - config.action_mailer.delivery_method = :smtp - # Defaults to: - # # config.action_mailer.sendmail_settings = { - # # location: '/usr/sbin/sendmail', - # # arguments: '-i -t' - # # } - config.action_mailer.perform_deliveries = true - config.action_mailer.raise_delivery_errors = true - - config.eager_load = true - - config.allow_concurrency = false -end diff --git a/states/roles/maintain/gitlab/conf_files/redis.conf b/states/roles/maintain/gitlab/conf_files/redis.conf deleted file mode 100644 index e79c9b5..0000000 --- a/states/roles/maintain/gitlab/conf_files/redis.conf +++ /dev/null @@ -1,1293 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 127.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -unixsocket /run/redis/redis.sock -unixsocketperm 770 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir /var/lib/redis/ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# Note that slaves never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute th DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of an user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transfered. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -slave-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node known its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 - diff --git a/states/roles/maintain/gitlab/conf_files/resque.yml b/states/roles/maintain/gitlab/conf_files/resque.yml deleted file mode 100644 index 6c7944f..0000000 --- a/states/roles/maintain/gitlab/conf_files/resque.yml +++ /dev/null @@ -1,34 +0,0 @@ -# If you change this file in a Merge Request, please also create -# a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -# -development: - url: unix:/run/redis/redis.sock - # sentinels: - # - - # host: localhost - # port: 26380 # point to sentinel, not to redis port - # - - # host: slave2 - # port: 26381 # point to sentinel, not to redis port -test: - url: unix:/run/redis/redis.sock -production: - # Redis (single instance) - url: unix:/run/redis/redis.sock - ## - # Redis + Sentinel (for HA) - # - # Please read instructions carefully before using it as you may lose data: - # http://redis.io/topics/sentinel - # - # You must specify a list of a few sentinels that will handle client connection - # please read here for more information: https://docs.gitlab.com/ce/administration/high_availability/redis.html - ## - # url: redis://master:6379 - # sentinels: - # - - # host: slave1 - # port: 26379 # point to sentinel, not to redis port - # - - # host: slave2 - # port: 26379 # point to sentinel, not to redis port diff --git a/states/roles/maintain/gitlab/conf_files/smtp_settings.rb b/states/roles/maintain/gitlab/conf_files/smtp_settings.rb deleted file mode 100644 index ebc93e9..0000000 --- a/states/roles/maintain/gitlab/conf_files/smtp_settings.rb +++ /dev/null @@ -1,23 +0,0 @@ -# To enable smtp email delivery for your GitLab instance do the following: -# 1. Rename this file to smtp_settings.rb -# 2. Edit settings inside this file -# 3. Restart GitLab instance -# -# For full list of options and their values see http://api.rubyonrails.org/classes/ActionMailer/Base.html -# -# If you change this file in a Merge Request, please also create a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests - -if Rails.env.production? - Rails.application.config.action_mailer.delivery_method = :smtp - - ActionMailer::Base.delivery_method = :smtp - ActionMailer::Base.smtp_settings = { - authentication: :plain, - address: "smtp.zoho.com", - port: 587, - user_name: "notifications@actcur.com", - password: "{%- include 'secure/passwords/gitlab_smtp_password.txt' -%}", - domain: "smtp.zoho.com", - enable_starttls_auto: true, - } -end diff --git a/states/roles/maintain/gitlab/conf_files/tmp_redis.conf b/states/roles/maintain/gitlab/conf_files/tmp_redis.conf deleted file mode 100644 index 773b8ea..0000000 --- a/states/roles/maintain/gitlab/conf_files/tmp_redis.conf +++ /dev/null @@ -1 +0,0 @@ -d /run/redis 0755 redis redis - diff --git a/states/roles/maintain/gitlab/init.sls b/states/roles/maintain/gitlab/init.sls deleted file mode 100644 index 2351299..0000000 --- a/states/roles/maintain/gitlab/init.sls +++ /dev/null @@ -1,175 +0,0 @@ -gitlab: - pkg.installed -mariadb: - pkg.installed -gitlab_nginx: - pkg.installed: - - name: nginx - -#managed files -/etc/webapps/gitlab/gitlab.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/gitlab.yml - - user: root - - group: root - - mode: 644 -/etc/webapps/gitlab/database.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/database.yml - - user: gitlab - - group: gitlab - - mode: 600 - - template: jinja -/etc/webapps/gitlab/resque.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/resque.yml - - user: root - - group: root - - mode: 644 -/etc/webapps/gitlab-shell/config.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/config.yml - - user: gitlab - - group: gitlab - - mode: 600 -/usr/share/webapps/gitlab/config/initializers/smtp_settings.rb: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/smtp_settings.rb - - user: root - - group: root - - mode: 644 - - template: jinja -/usr/share/webapps/gitlab/config/environments/production.rb: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/production.rb - - user: root - - group: root - - mode: 644 -/etc/redis.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/redis.conf - - user: root - - group: root - - mode: 644 -/etc/tempfiles.d/redis.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/tmp_redis.conf - - user: root - - group: root - - mode: 644 - - makedirs: true -/etc/nginx/conf.d/gitlab.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/gitlab.conf - - user: root - - group: root - - makedirs: true - - dir_mode: 755 - - mode: 644 - -#add users git and gitlab to redis group -git_user: - user.present: - - name: git - - groups: - - redis -gitlab_user: - user.present: - - name: gitlab - - groups: - - redis - -#migrate redis database as gitlab user if necessary -redis-running: - service.running: - - name: redis - - enable: true - - watch: - - file: /etc/redis.conf - - file: /etc/tempfiles.d/redis.conf -gitlab_rake_db: - cmd.run: - - name: "bundle-2.3 exec rake db:migrate RAILS_ENV=production" - - cwd: "/usr/share/webapps/gitlab" - - runas: gitlab - - watch: - - pkg: gitlab - -#global git configuration -gitlab_git_name: - git.config_set: - - name: user.name - - value: "Actaeus Curabitur" - - user: gitlab - - global: true -gitlab_git_email: - git.config_set: - - name: user.email - - value: "actcur@actcur.com" - - user: gitlab - - global: true -gitlab_git_crlf: - git.config_set: - - name: core.autocrlf - - value: "input" - - user: gitlab - - global: true - -#create symlink -symlink_repos: - file.symlink: - - name: /var/lib/gitlab/repositories - - target: /mnt/repos - - force: true -#verify perms for repos are right -/var/lib/gitlab/repositories/: - file.directory: - - user: gitlab - - group: gitlab - - dir_mode: 4770 - -#start services -gitlab.target: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-workhorse: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-unicorn: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-sidekiq: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb diff --git a/states/roles/maintain/gitlabarch/conf_files/config.yml b/states/roles/maintain/gitlabarch/conf_files/config.yml deleted file mode 100644 index 0c802a8..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/config.yml +++ /dev/null @@ -1,73 +0,0 @@ -# -# If you change this file in a Merge Request, please also create -# a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -# - -# GitLab user. git by default -user: gitlab - -# URL to GitLab instance, used for API calls. Default: http://localhost:8080. -# For relative URL support read http://doc.gitlab.com/ce/install/relative_url.html -# You only have to change the default if you have configured Unicorn -# to listen on a custom port, or if you have configured Unicorn to -# only listen on a Unix domain socket. For Unix domain sockets use -# "http+unix://", e.g. -# "http+unix://%2Fpath%2Fto%2Fsocket" -gitlab_url: "http://localhost:8080" - -# See installation.md#using-https for additional HTTPS configuration details. -http_settings: -# read_timeout: 300 -# user: someone -# password: somepass -# ca_file: /etc/ssl/cert.pem -# ca_path: /etc/pki/tls/certs - self_signed_cert: false - -# File used as authorized_keys for gitlab user -auth_file: "/var/lib/gitlab/.ssh/authorized_keys" - -# File that contains the secret key for verifying access to GitLab. -# Default is .gitlab_shell_secret in the gitlab-shell directory. -# secret_file: "/var/lib/gitlab/gitlab-shell/.gitlab_shell_secret" - -# Parent directory for global custom hook directories (pre-receive.d, update.d, post-receive.d) -# Default is hooks in the gitlab-shell directory. -# custom_hooks_dir: "/var/lib/gitlab/gitlab-shell/hooks" - -# Redis settings used for pushing commit notices to gitlab -redis: - bin: /usr/bin/redis-cli - host: 127.0.0.1 - port: 6379 - # pass: redispass # Allows you to specify the password for Redis - database: 5 - socket: /run/redis/redis.sock # Comment out this line if you want to use TCP or Sentinel - namespace: resque:gitlab - # sentinels: - # - - # host: 127.0.0.1 - # port: 26380 - # - - # host: 127.0.0.1 - # port: 26381 - - -# Log file. -# Default is gitlab-shell.log in the root directory. -log_file: "/var/log/gitlab/gitlab-shell.log" - -# Log level. INFO by default -log_level: INFO - -# Audit usernames. -# Set to true to see real usernames in the logs instead of key ids, which is easier to follow, but -# incurs an extra API call on every gitlab-shell command. -audit_usernames: false - -# Git trace log file. -# If set, git commands receive GIT_TRACE* environment variables -# See https://git-scm.com/book/es/v2/Git-Internals-Environment-Variables#Debugging for documentation -# An absolute path starting with / – the trace output will be appended to that file. -# It needs to exist so we can check permissions and avoid to throwing warnings to the users. -git_trace_log_file: diff --git a/states/roles/maintain/gitlabarch/conf_files/database.yml b/states/roles/maintain/gitlabarch/conf_files/database.yml deleted file mode 100644 index 6633c0d..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/database.yml +++ /dev/null @@ -1,44 +0,0 @@ -# -# PRODUCTION -# -production: - adapter: mysql2 - encoding: utf8 - collation: utf8_general_ci - reconnect: false - database: gitlab - pool: 10 - username: gitlab - password: "{%- include 'secure/passwords/gitlab_db_password.txt' -%}" - host: sql.actcur.com - # socket: /tmp/mysql.sock - -# -# Development specific -# -development: - adapter: mysql2 - encoding: utf8 - collation: utf8_general_ci - reconnect: false - database: gitlabhq_development - pool: 5 - username: root - password: "secure password" - # host: localhost - # socket: /tmp/mysql.sock - -# Warning: The database defined as "test" will be erased and -# re-generated from your development database when you run "rake". -# Do not set this db to the same as development or production. -test: &test - adapter: mysql2 - encoding: utf8mb4 - collation: utf8mb4_general_ci - reconnect: false - database: gitlabhq_test - pool: 5 - username: root - password: - # host: localhost - # socket: /tmp/mysql.sock diff --git a/states/roles/maintain/gitlabarch/conf_files/gitlab.conf b/states/roles/maintain/gitlabarch/conf_files/gitlab.conf deleted file mode 100644 index cda4f4e..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/gitlab.conf +++ /dev/null @@ -1,69 +0,0 @@ -## GitLab -## -## Lines starting with two hashes (##) are comments with information. -## Lines starting with one hash (#) are configuration parameters that can be uncommented. -## -################################## -## CONTRIBUTING ## -################################## -## -## If you change this file in a Merge Request, please also create -## a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -## -################################### -## configuration ## -################################### -## -## See installation.md#using-https for additional HTTPS configuration details. - -upstream gitlab-workhorse { - server unix:/run/gitlab/gitlab-workhorse.socket fail_timeout=0; -} - -## Normal HTTP host -server { - ## Either remove "default_server" from the listen line below, - ## or delete the /etc/nginx/sites-enabled/default file. This will cause gitlab - ## to be served if you visit any address that your server responds to, eg. - ## the ip address of the server (http://x.x.x.x/)n 0.0.0.0:80 default_server; - listen 0.0.0.0:8000; - listen [::]:8000; - server_name git2.actcur.com; ## Replace this with something like gitlab.example.com - server_tokens off; ## Don't show the nginx version number, a security best practice - - ## See app/controllers/application_controller.rb for headers set - - ## Individual nginx logs for this GitLab vhost - access_log /var/log/nginx/gitlab_access.log; - error_log /var/log/nginx/gitlab_error.log; - - location / { - client_max_body_size 0; - gzip off; - - ## https://github.com/gitlabhq/gitlabhq/issues/694 - ## Some requests take more than 30 seconds. - proxy_read_timeout 300; - proxy_connect_timeout 300; - proxy_redirect off; - - proxy_http_version 1.1; - - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_pass http://gitlab-workhorse; - } - - error_page 404 /404.html; - error_page 422 /422.html; - error_page 500 /500.html; - error_page 502 /502.html; - location ~ ^/(404|422|500|502)\.html$ { - root /usr/share/webapps/gitlab/public; - internal; - } - -} diff --git a/states/roles/maintain/gitlabarch/conf_files/gitlab.yml b/states/roles/maintain/gitlabarch/conf_files/gitlab.yml deleted file mode 100644 index 233d4e8..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/gitlab.yml +++ /dev/null @@ -1,627 +0,0 @@ -# # # # # # # # # # # # # # # # # # -# GitLab application config file # -# # # # # # # # # # # # # # # # # # -# -########################### NOTE ##################################### -# This file should not receive new settings. All configuration options # -# * are being moved to ApplicationSetting model! # -# If a setting requires an application restart say so in that screen. # -# If you change this file in a Merge Request, please also create # -# a MR on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests # -######################################################################## -# -# -# How to use: -# 1. Copy file as gitlab.yml -# 2. Update gitlab -> host with your fully qualified domain name -# 3. Update gitlab -> email_from -# 4. If you installed Git from source, change git -> bin_path to /usr/local/bin/git -# IMPORTANT: If Git was installed in a different location use that instead. -# You can check with `which git`. If a wrong path of Git is specified, it will -# result in various issues such as failures of GitLab CI builds. -# 5. Review this configuration file for other settings you may want to adjust - -production: &base - # - # 1. GitLab app settings - # ========================== - - ## GitLab settings - gitlab: - ## Web server settings (note: host is the FQDN, do not include http://) - host: git.actcur.com - port: 443 # Set to 443 if using HTTPS, see installation.md#using-https for additional HTTPS configuration details - https: true # Set to true if using HTTPS, see installation.md#using-https for additional HTTPS configuration details - - # Uncommment this line below if your ssh host is different from HTTP/HTTPS one - # (you'd obviously need to replace ssh.host_example.com with your own host). - # Otherwise, ssh host will be set to the `host:` value above - # ssh_host: ssh.host_example.com - - # Relative URL support - # WARNING: We recommend using an FQDN to host GitLab in a root path instead - # of using a relative URL. - # Documentation: http://doc.gitlab.com/ce/install/relative_url.html - # Uncomment and customize the following line to run in a non-root path - # - # relative_url_root: /gitlab - - # Trusted Proxies - # Customize if you have GitLab behind a reverse proxy which is running on a different machine. - # Add the IP address for your reverse proxy to the list, otherwise users will appear signed in from that address. - trusted_proxies: - # Examples: - #- 192.168.1.0/24 - #- 192.168.2.1 - #- 2001:0db8::/32 - - # Uncomment and customize if you can't use the default user to run GitLab (default: 'git') - user: gitlab - - ## Date & Time settings - # Uncomment and customize if you want to change the default time zone of GitLab application. - # To see all available zones, run `bundle exec rake time:zones:all RAILS_ENV=production` - # time_zone: 'UTC' - - ## Email settings - # Uncomment and set to false if you need to disable email sending from GitLab (default: true) - # email_enabled: true - # Email address used in the "From" field in mails sent by GitLab - email_from: notifications@actcur.com - email_display_name: Actcur Git - email_reply_to: noreply@actcur.com - email_subject_suffix: '' - - # Email server smtp settings are in config/initializers/smtp_settings.rb.sample - - # default_can_create_group: false # default: true - # username_changing_enabled: false # default: true - User can change her username/namespace - - ## Automatic issue closing - # If a commit message matches this regular expression, all issues referenced from the matched text will be closed. - # This happens when the commit is pushed or merged into the default branch of a project. - # When not specified the default issue_closing_pattern as specified below will be used. - # Tip: you can test your closing pattern at http://rubular.com. - # issue_closing_pattern: '((?:[Cc]los(?:e[sd]?|ing)|[Ff]ix(?:e[sd]|ing)?|[Rr]esolv(?:e[sd]?|ing))(:?) +(?:(?:issues? +)?%{issue_ref}(?:(?:, *| +and +)?)|([A-Z][A-Z0-9_]+-\d+))+)' - - ## Default project features settings - default_projects_features: - issues: true - merge_requests: true - wiki: true - snippets: true - builds: true - container_registry: true - - ## Webhook settings - # Number of seconds to wait for HTTP response after sending webhook HTTP POST request (default: 10) - # webhook_timeout: 10 - - ## Repository downloads directory - # When a user clicks e.g. 'Download zip' on a project, a temporary zip file is created in the following directory. - # The default is 'shared/cache/archive/' relative to the root of the Rails app. - # repository_downloads_path: shared/cache/archive/ - - ## Reply by email - # Allow users to comment on issues and merge requests by replying to notification emails. - # For documentation on how to set this up, see http://doc.gitlab.com/ce/administration/reply_by_email.html - incoming_email: - enabled: false - - # The email address including the `%{key}` placeholder that will be replaced to reference the item being replied to. - # The placeholder can be omitted but if present, it must appear in the "user" part of the address (before the `@`). - address: "gitlab-incoming+%{key}@gmail.com" - - # Email account username - # With third party providers, this is usually the full email address. - # With self-hosted email servers, this is usually the user part of the email address. - user: "gitlab-incoming@gmail.com" - # Email account password - password: "[REDACTED]" - - # IMAP server host - host: "imap.gmail.com" - # IMAP server port - port: 993 - # Whether the IMAP server uses SSL - ssl: true - # Whether the IMAP server uses StartTLS - start_tls: false - - # The mailbox where incoming mail will end up. Usually "inbox". - mailbox: "inbox" - # The IDLE command timeout. - idle_timeout: 60 - - ## Build Artifacts - artifacts: - enabled: true - # The location where build artifacts are stored (default: shared/artifacts). - # path: shared/artifacts - - ## Git LFS - lfs: - enabled: true - # The location where LFS objects are stored (default: shared/lfs-objects). - # storage_path: shared/lfs-objects - - ## GitLab Pages - pages: - enabled: false - # The location where pages are stored (default: shared/pages). - # path: shared/pages - - # The domain under which the pages are served: - # http://group.example.com/project - # or project path can be a group page: group.example.com - host: example.com - port: 80 # Set to 443 if you serve the pages with HTTPS - https: false # Set to true if you serve the pages with HTTPS - # external_http: ["1.1.1.1:80", "[2001::1]:80"] # If defined, enables custom domain support in GitLab Pages - # external_https: ["1.1.1.1:443", "[2001::1]:443"] # If defined, enables custom domain and certificate support in GitLab Pages - - ## Mattermost - ## For enabling Add to Mattermost button - mattermost: - enabled: false - host: 'https://mattermost.example.com' - - ## Gravatar - ## For Libravatar see: http://doc.gitlab.com/ce/customization/libravatar.html - gravatar: - # gravatar urls: possible placeholders: %{hash} %{size} %{email} %{username} - # plain_url: "http://..." # default: http://www.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon - # ssl_url: "https://..." # default: https://secure.gravatar.com/avatar/%{hash}?s=%{size}&d=identicon - - ## Auxiliary jobs - # Periodically executed jobs, to self-heal Gitlab, do external synchronizations, etc. - # Please read here for more information: https://github.com/ondrejbartas/sidekiq-cron#adding-cron-job - cron_jobs: - # Flag stuck CI jobs as failed - stuck_ci_jobs_worker: - cron: "0 * * * *" - # Execute scheduled triggers - pipeline_schedule_worker: - cron: "19 * * * *" - # Remove expired build artifacts - expire_build_artifacts_worker: - cron: "50 * * * *" - # Periodically run 'git fsck' on all repositories. If started more than - # once per hour you will have concurrent 'git fsck' jobs. - repository_check_worker: - cron: "20 * * * *" - # Send admin emails once a week - admin_email_worker: - cron: "0 0 * * 0" - - # Remove outdated repository archives - repository_archive_cache_worker: - cron: "0 * * * *" - - registry: - # enabled: true - # host: registry.example.com - # port: 5005 - # api_url: http://localhost:5000/ # internal address to the registry, will be used by GitLab to directly communicate with API - # key: config/registry.key - # path: shared/registry - # issuer: gitlab-issuer - - # - # 2. GitLab CI settings - # ========================== - - gitlab_ci: - # Default project notifications settings: - # - # Send emails only on broken builds (default: true) - # all_broken_builds: true - # - # Add pusher to recipients list (default: false) - # add_pusher: true - - # The location where build traces are stored (default: builds/). Relative paths are relative to Rails.root - # builds_path: builds/ - - # - # 3. Auth settings - # ========================== - - ## LDAP settings - # You can inspect a sample of the LDAP users with login access by running: - # bundle exec rake gitlab:ldap:check RAILS_ENV=production - ldap: - enabled: false - servers: - ########################################################################## - # - # Since GitLab 7.4, LDAP servers get ID's (below the ID is 'main'). GitLab - # Enterprise Edition now supports connecting to multiple LDAP servers. - # - # If you are updating from the old (pre-7.4) syntax, you MUST give your - # old server the ID 'main'. - # - ########################################################################## - main: # 'main' is the GitLab 'provider ID' of this LDAP server - ## label - # - # A human-friendly name for your LDAP server. It is OK to change the label later, - # for instance if you find out it is too large to fit on the web page. - # - # Example: 'Paris' or 'Acme, Ltd.' - label: 'LDAP' - - host: '_your_ldap_server' - port: 389 - uid: 'sAMAccountName' - method: 'plain' # "tls" or "ssl" or "plain" - bind_dn: '_the_full_dn_of_the_user_you_will_bind_with' - password: '_the_password_of_the_bind_user' - - # Set a timeout, in seconds, for LDAP queries. This helps avoid blocking - # a request if the LDAP server becomes unresponsive. - # A value of 0 means there is no timeout. - timeout: 10 - - # This setting specifies if LDAP server is Active Directory LDAP server. - # For non AD servers it skips the AD specific queries. - # If your LDAP server is not AD, set this to false. - active_directory: true - - # If allow_username_or_email_login is enabled, GitLab will ignore everything - # after the first '@' in the LDAP username submitted by the user on login. - # - # Example: - # - the user enters 'jane.doe@example.com' and 'p@ssw0rd' as LDAP credentials; - # - GitLab queries the LDAP server with 'jane.doe' and 'p@ssw0rd'. - # - # If you are using "uid: 'userPrincipalName'" on ActiveDirectory you need to - # disable this setting, because the userPrincipalName contains an '@'. - allow_username_or_email_login: false - - # To maintain tight control over the number of active users on your GitLab installation, - # enable this setting to keep new users blocked until they have been cleared by the admin - # (default: false). - block_auto_created_users: false - - # Base where we can search for users - # - # Ex. ou=People,dc=gitlab,dc=example - # - base: '' - - # Filter LDAP users - # - # Format: RFC 4515 http://tools.ietf.org/search/rfc4515 - # Ex. (employeeType=developer) - # - # Note: GitLab does not support omniauth-ldap's custom filter syntax. - # - user_filter: '' - - # LDAP attributes that GitLab will use to create an account for the LDAP user. - # The specified attribute can either be the attribute name as a string (e.g. 'mail'), - # or an array of attribute names to try in order (e.g. ['mail', 'email']). - # Note that the user's LDAP login will always be the attribute specified as `uid` above. - attributes: - # The username will be used in paths for the user's own projects - # (like `gitlab.example.com/username/project`) and when mentioning - # them in issues, merge request and comments (like `@username`). - # If the attribute specified for `username` contains an email address, - # the GitLab username will be the part of the email address before the '@'. - username: ['uid', 'userid', 'sAMAccountName'] - email: ['mail', 'email', 'userPrincipalName'] - - # If no full name could be found at the attribute specified for `name`, - # the full name is determined using the attributes specified for - # `first_name` and `last_name`. - name: 'cn' - first_name: 'givenName' - last_name: 'sn' - - # GitLab EE only: add more LDAP servers - # Choose an ID made of a-z and 0-9 . This ID will be stored in the database - # so that GitLab can remember which LDAP server a user belongs to. - # uswest2: - # label: - # host: - # .... - - - ## OmniAuth settings - omniauth: - # Allow login via Twitter, Google, etc. using OmniAuth providers - enabled: false - - # Uncomment this to automatically sign in with a specific omniauth provider's without - # showing GitLab's sign-in page (default: show the GitLab sign-in page) - # auto_sign_in_with_provider: saml - - # Sync user's email address from the specified Omniauth provider every time the user logs - # in (default: nil). And consequently make this field read-only. - # sync_email_from_provider: cas3 - - # CAUTION! - # This allows users to login without having a user account first. Define the allowed providers - # using an array, e.g. ["saml", "twitter"], or as true/false to allow all providers or none. - # User accounts will be created automatically when authentication was successful. - allow_single_sign_on: ["saml"] - - # Locks down those users until they have been cleared by the admin (default: true). - block_auto_created_users: true - # Look up new users in LDAP servers. If a match is found (same uid), automatically - # link the omniauth identity with the LDAP account. (default: false) - auto_link_ldap_user: false - - # Allow users with existing accounts to login and auto link their account via SAML - # login, without having to do a manual login first and manually add SAML - # (default: false) - auto_link_saml_user: false - - # Set different Omniauth providers as external so that all users creating accounts - # via these providers will not be able to have access to internal projects. You - # will need to use the full name of the provider, like `google_oauth2` for Google. - # Refer to the examples below for the full names of the supported providers. - # (default: []) - external_providers: [] - - ## Auth providers - # Uncomment the following lines and fill in the data of the auth provider you want to use - # If your favorite auth provider is not listed you can use others: - # see https://github.com/gitlabhq/gitlab-public-wiki/wiki/Custom-omniauth-provider-configurations - # The 'app_id' and 'app_secret' parameters are always passed as the first two - # arguments, followed by optional 'args' which can be either a hash or an array. - # Documentation for this is available at http://doc.gitlab.com/ce/integration/omniauth.html - providers: - # See omniauth-cas3 for more configuration details - # - { name: 'cas3', - # label: 'cas3', - # args: { - # url: 'https://sso.example.com', - # disable_ssl_verification: false, - # login_url: '/cas/login', - # service_validate_url: '/cas/p3/serviceValidate', - # logout_url: '/cas/logout'} } - # - { name: 'authentiq', - # # for client credentials (client ID and secret), go to https://www.authentiq.com/ - # app_id: 'YOUR_CLIENT_ID', - # app_secret: 'YOUR_CLIENT_SECRET', - # args: { - # scope: 'aq:name email~rs address aq:push' - # # redirect_uri parameter is optional except when 'gitlab.host' in this file is set to 'localhost' - # # redirect_uri: 'YOUR_REDIRECT_URI' - # } - # } - # - { name: 'github', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # url: "https://github.com/", - # verify_ssl: true, - # args: { scope: 'user:email' } } - # - { name: 'bitbucket', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - { name: 'gitlab', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # args: { scope: 'api' } } - # - { name: 'google_oauth2', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET', - # args: { access_type: 'offline', approval_prompt: '' } } - # - { name: 'facebook', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - { name: 'twitter', - # app_id: 'YOUR_APP_ID', - # app_secret: 'YOUR_APP_SECRET' } - # - # - { name: 'saml', - # label: 'Our SAML Provider', - # groups_attribute: 'Groups', - # external_groups: ['Contractors', 'Freelancers'], - # args: { - # assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', - # idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', - # idp_sso_target_url: 'https://login.example.com/idp', - # issuer: 'https://gitlab.example.com', - # name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:transient' - # } } - # - # - { name: 'crowd', - # args: { - # crowd_server_url: 'CROWD SERVER URL', - # application_name: 'YOUR_APP_NAME', - # application_password: 'YOUR_APP_PASSWORD' } } - # - # - { name: 'auth0', - # args: { - # client_id: 'YOUR_AUTH0_CLIENT_ID', - # client_secret: 'YOUR_AUTH0_CLIENT_SECRET', - # namespace: 'YOUR_AUTH0_DOMAIN' } } - - # SSO maximum session duration in seconds. Defaults to CAS default of 8 hours. - # cas3: - # session_duration: 28800 - - # Shared file storage settings - shared: - path: /var/lib/gitlab/shared # Default: shared - - # Gitaly settings - gitaly: - # This setting controls whether GitLab uses Gitaly (new component - # introduced in 9.0). Eventually Gitaly use will become mandatory and - # this option will disappear. - enabled: true - - # - # 4. Advanced settings - # ========================== - - ## Repositories settings - repositories: - # Paths where repositories can be stored. Give the canonicalized absolute pathname. - # IMPORTANT: None of the path components may be symlink, because - # gitlab-shell invokes Dir.pwd inside the repository path and that results - # real path not the symlink. - storages: # You must have at least a `default` storage path. - default: - path: /var/lib/gitlab/repositories/ - gitaly_address: unix:/var/lib/gitlab/sockets/gitlab-gitaly.socket # TCP connections are supported too (e.g. tcp://host:port) - - ## Backup settings - backup: - path: "/var/lib/gitlab/backups" # Relative paths are relative to Rails.root (default: tmp/backups/) - # archive_permissions: 0640 # Permissions for the resulting backup.tar file (default: 0600) - # keep_time: 604800 # default: 0 (forever) (in seconds) - # pg_schema: public # default: nil, it means that all schemas will be backed up - # upload: - # # Fog storage connection settings, see http://fog.io/storage/ . - # connection: - # provider: AWS - # region: eu-west-1 - # aws_access_key_id: AKIAKIAKI - # aws_secret_access_key: 'secret123' - # # The remote 'directory' to store your backups. For S3, this would be the bucket name. - # remote_directory: 'my.s3.bucket' - # # Use multipart uploads when file size reaches 100MB, see - # # http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html - # multipart_chunk_size: 104857600 - # # Turns on AWS Server-Side Encryption with Amazon S3-Managed Keys for backups, this is optional - # # encryption: 'AES256' - # # Specifies Amazon S3 storage class to use for backups, this is optional - # # storage_class: 'STANDARD' - - ## GitLab Shell settings - gitlab_shell: - path: /usr/share/webapps/gitlab-shell/ - hooks_path: /usr/share/webapps/gitlab-shell/hooks/ - - # File that contains the secret key for verifying access for gitlab-shell. - # Default is '.gitlab_shell_secret' relative to Rails.root (i.e. root of the GitLab app). - # secret_file: /home/git/gitlab/.gitlab_shell_secret - - # Git over HTTP - upload_pack: true - receive_pack: true - - # Git import/fetch timeout - # git_timeout: 800 - - # If you use non-standard ssh port you need to specify it - # ssh_port: 22 - - workhorse: - # File that contains the secret key for verifying access for gitlab-workhorse. - # Default is '.gitlab_workhorse_secret' relative to Rails.root (i.e. root of the GitLab app). - # secret_file: /home/git/gitlab/.gitlab_workhorse_secret - - ## Git settings - # CAUTION! - # Use the default values unless you really know what you are doing - git: - bin_path: /usr/bin/git - # The next value is the maximum memory size grit can use - # Given in number of bytes per git object (e.g. a commit) - # This value can be increased if you have very large commits - max_size: 20971520 # 20.megabytes - # Git timeout to read a commit, in seconds - timeout: 10 - - ## Webpack settings - # If enabled, this will tell rails to serve frontend assets from the webpack-dev-server running - # on a given port instead of serving directly from /assets/webpack. This is only indended for use - # in development. - webpack: - # dev_server: - # enabled: true - # host: localhost - # port: 3808 - - # - # 5. Extra customization - # ========================== - - extra: - ## Google analytics. Uncomment if you want it - # google_analytics_id: '_your_tracking_id' - - ## Piwik analytics. - # piwik_url: '_your_piwik_url' - # piwik_site_id: '_your_piwik_site_id' - - rack_attack: - git_basic_auth: - # Rack Attack IP banning enabled - # enabled: true - # - # Whitelist requests from 127.0.0.1 for web proxies (NGINX/Apache) with incorrect headers - # ip_whitelist: ["127.0.0.1"] - # - # Limit the number of Git HTTP authentication attempts per IP - # maxretry: 10 - # - # Reset the auth attempt counter per IP after 60 seconds - # findtime: 60 - # - # Ban an IP for one hour (3600s) after too many auth attempts - # bantime: 3600 - -development: - <<: *base - -test: - <<: *base - gravatar: - enabled: true - lfs: - enabled: false - gitlab: - host: localhost - port: 80 - - # When you run tests we clone and setup gitlab-shell - # In order to setup it correctly you need to specify - # your system username you use to run GitLab - # user: YOUR_USERNAME - pages: - path: tmp/tests/pages - repositories: - storages: - default: - path: tmp/tests/repositories/ - gitaly_address: unix:tmp/tests/gitaly/gitaly.socket - gitaly: - enabled: true - backup: - path: tmp/tests/backups - gitlab_shell: - path: tmp/tests/gitlab-shell/ - hooks_path: tmp/tests/gitlab-shell/hooks/ - issues_tracker: - redmine: - title: "Redmine" - project_url: "http://redmine/projects/:issues_tracker_id" - issues_url: "http://redmine/:project_id/:issues_tracker_id/:id" - new_issue_url: "http://redmine/projects/:issues_tracker_id/issues/new" - jira: - title: "JIRA" - url: https://sample_company.atlassian.net - project_key: PROJECT - ldap: - enabled: false - servers: - main: - label: ldap - host: 127.0.0.1 - port: 3890 - uid: 'uid' - method: 'plain' # "tls" or "ssl" or "plain" - base: 'dc=example,dc=com' - user_filter: '' - group_base: 'ou=groups,dc=example,dc=com' - admin_group: '' - -staging: - <<: *base diff --git a/states/roles/maintain/gitlabarch/conf_files/production.rb b/states/roles/maintain/gitlabarch/conf_files/production.rb deleted file mode 100644 index 0b88842..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/production.rb +++ /dev/null @@ -1,83 +0,0 @@ -Rails.application.configure do - # Settings specified here will take precedence over those in config/application.rb - - # Code is not reloaded between requests - config.cache_classes = true - - # Full error reports are disabled and caching is turned on - config.consider_all_requests_local = false - config.action_controller.perform_caching = true - - # Disable Rails's static asset server (Apache or nginx will already do this) - config.serve_static_files = false - - # Compress JavaScripts and CSS. - config.assets.js_compressor = :uglifier - # config.assets.css_compressor = :sass - - # Don't fallback to assets pipeline if a precompiled asset is missed - config.assets.compile = false - - # Generate digests for assets URLs - config.assets.digest = true - - # Enable compression of compiled assets using gzip. - config.assets.compress = true - - # Defaults to nil and saved in location specified by config.assets.prefix - # config.assets.manifest = YOUR_PATH - - # Specifies the header that your server uses for sending files - # config.action_dispatch.x_sendfile_header = "X-Sendfile" # for apache - # config.action_dispatch.x_sendfile_header = 'X-Accel-Redirect' # for nginx - - # Force all access to the app over SSL, use Strict-Transport-Security, and use secure cookies. - # config.force_ssl = true - - # See everything in the log (default is :info) - config.log_level = :info - - # Suppress 'Rendered template ...' messages in the log - # source: http://stackoverflow.com/a/16369363 - %w{render_template render_partial render_collection}.each do |event| - ActiveSupport::Notifications.unsubscribe "#{event}.action_view" - end - - # Prepend all log lines with the following tags - # config.log_tags = [ :subdomain, :uuid ] - - # Use a different logger for distributed setups - # config.logger = ActiveSupport::TaggedLogging.new(SyslogLogger.new) - - # Enable serving of images, stylesheets, and JavaScripts from an asset server - config.action_controller.asset_host = ENV['GITLAB_CDN_HOST'] if ENV['GITLAB_CDN_HOST'].present? - - # Precompile additional assets (application.js, application.css, and all non-JS/CSS are already added) - # config.assets.precompile += %w( search.js ) - - # Disable delivery errors, bad email addresses will be ignored - # config.action_mailer.raise_delivery_errors = false - - # Enable threaded mode - # config.threadsafe! unless $rails_rake_task - - # Enable locale fallbacks for I18n (makes lookups for any locale fall back to - # the I18n.default_locale when a translation can not be found) - config.i18n.fallbacks = true - - # Send deprecation notices to registered listeners - config.active_support.deprecation = :notify - - config.action_mailer.delivery_method = :smtp - # Defaults to: - # # config.action_mailer.sendmail_settings = { - # # location: '/usr/sbin/sendmail', - # # arguments: '-i -t' - # # } - config.action_mailer.perform_deliveries = true - config.action_mailer.raise_delivery_errors = true - - config.eager_load = true - - config.allow_concurrency = false -end diff --git a/states/roles/maintain/gitlabarch/conf_files/redis.conf b/states/roles/maintain/gitlabarch/conf_files/redis.conf deleted file mode 100644 index e79c9b5..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/redis.conf +++ /dev/null @@ -1,1293 +0,0 @@ -# Redis configuration file example. -# -# Note that in order to read the configuration file, Redis must be -# started with the file path as first argument: -# -# ./redis-server /path/to/redis.conf - -# Note on units: when memory size is needed, it is possible to specify -# it in the usual form of 1k 5GB 4M and so forth: -# -# 1k => 1000 bytes -# 1kb => 1024 bytes -# 1m => 1000000 bytes -# 1mb => 1024*1024 bytes -# 1g => 1000000000 bytes -# 1gb => 1024*1024*1024 bytes -# -# units are case insensitive so 1GB 1Gb 1gB are all the same. - -################################## INCLUDES ################################### - -# Include one or more other config files here. This is useful if you -# have a standard template that goes to all Redis servers but also need -# to customize a few per-server settings. Include files can include -# other files, so use this wisely. -# -# Notice option "include" won't be rewritten by command "CONFIG REWRITE" -# from admin or Redis Sentinel. Since Redis always uses the last processed -# line as value of a configuration directive, you'd better put includes -# at the beginning of this file to avoid overwriting config change at runtime. -# -# If instead you are interested in using includes to override configuration -# options, it is better to use include as the last line. -# -# include /path/to/local.conf -# include /path/to/other.conf - -################################## MODULES ##################################### - -# Load modules at startup. If the server is not able to load modules -# it will abort. It is possible to use multiple loadmodule directives. -# -# loadmodule /path/to/my_module.so -# loadmodule /path/to/other_module.so - -################################## NETWORK ##################################### - -# By default, if no "bind" configuration directive is specified, Redis listens -# for connections from all the network interfaces available on the server. -# It is possible to listen to just one or multiple selected interfaces using -# the "bind" configuration directive, followed by one or more IP addresses. -# -# Examples: -# -# bind 192.168.1.100 10.0.0.1 -# bind 127.0.0.1 ::1 -# -# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the -# internet, binding to all the interfaces is dangerous and will expose the -# instance to everybody on the internet. So by default we uncomment the -# following bind directive, that will force Redis to listen only into -# the IPv4 lookback interface address (this means Redis will be able to -# accept connections only from clients running into the same computer it -# is running). -# -# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES -# JUST COMMENT THE FOLLOWING LINE. -# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bind 127.0.0.1 - -# Protected mode is a layer of security protection, in order to avoid that -# Redis instances left open on the internet are accessed and exploited. -# -# When protected mode is on and if: -# -# 1) The server is not binding explicitly to a set of addresses using the -# "bind" directive. -# 2) No password is configured. -# -# The server only accepts connections from clients connecting from the -# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain -# sockets. -# -# By default protected mode is enabled. You should disable it only if -# you are sure you want clients from other hosts to connect to Redis -# even if no authentication is configured, nor a specific set of interfaces -# are explicitly listed using the "bind" directive. -protected-mode yes - -# Accept connections on the specified port, default is 6379 (IANA #815344). -# If port 0 is specified Redis will not listen on a TCP socket. -port 6379 - -# TCP listen() backlog. -# -# In high requests-per-second environments you need an high backlog in order -# to avoid slow clients connections issues. Note that the Linux kernel -# will silently truncate it to the value of /proc/sys/net/core/somaxconn so -# make sure to raise both the value of somaxconn and tcp_max_syn_backlog -# in order to get the desired effect. -tcp-backlog 511 - -# Unix socket. -# -# Specify the path for the Unix socket that will be used to listen for -# incoming connections. There is no default, so Redis will not listen -# on a unix socket when not specified. -# -unixsocket /run/redis/redis.sock -unixsocketperm 770 - -# Close the connection after a client is idle for N seconds (0 to disable) -timeout 0 - -# TCP keepalive. -# -# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence -# of communication. This is useful for two reasons: -# -# 1) Detect dead peers. -# 2) Take the connection alive from the point of view of network -# equipment in the middle. -# -# On Linux, the specified value (in seconds) is the period used to send ACKs. -# Note that to close the connection the double of the time is needed. -# On other kernels the period depends on the kernel configuration. -# -# A reasonable value for this option is 300 seconds, which is the new -# Redis default starting with Redis 3.2.1. -tcp-keepalive 300 - -################################# GENERAL ##################################### - -# By default Redis does not run as a daemon. Use 'yes' if you need it. -# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. -daemonize no - -# If you run Redis from upstart or systemd, Redis can interact with your -# supervision tree. Options: -# supervised no - no supervision interaction -# supervised upstart - signal upstart by putting Redis into SIGSTOP mode -# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET -# supervised auto - detect upstart or systemd method based on -# UPSTART_JOB or NOTIFY_SOCKET environment variables -# Note: these supervision methods only signal "process is ready." -# They do not enable continuous liveness pings back to your supervisor. -supervised no - -# If a pid file is specified, Redis writes it where specified at startup -# and removes it at exit. -# -# When the server runs non daemonized, no pid file is created if none is -# specified in the configuration. When the server is daemonized, the pid file -# is used even if not specified, defaulting to "/var/run/redis.pid". -# -# Creating a pid file is best effort: if Redis is not able to create it -# nothing bad happens, the server will start and run normally. -pidfile /var/run/redis_6379.pid - -# Specify the server verbosity level. -# This can be one of: -# debug (a lot of information, useful for development/testing) -# verbose (many rarely useful info, but not a mess like the debug level) -# notice (moderately verbose, what you want in production probably) -# warning (only very important / critical messages are logged) -loglevel notice - -# Specify the log file name. Also the empty string can be used to force -# Redis to log on the standard output. Note that if you use standard -# output for logging but daemonize, logs will be sent to /dev/null -logfile "" - -# To enable logging to the system logger, just set 'syslog-enabled' to yes, -# and optionally update the other syslog parameters to suit your needs. -# syslog-enabled no - -# Specify the syslog identity. -# syslog-ident redis - -# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. -# syslog-facility local0 - -# Set the number of databases. The default database is DB 0, you can select -# a different one on a per-connection basis using SELECT where -# dbid is a number between 0 and 'databases'-1 -databases 16 - -# By default Redis shows an ASCII art logo only when started to log to the -# standard output and if the standard output is a TTY. Basically this means -# that normally a logo is displayed only in interactive sessions. -# -# However it is possible to force the pre-4.0 behavior and always show a -# ASCII art logo in startup logs by setting the following option to yes. -always-show-logo yes - -################################ SNAPSHOTTING ################################ -# -# Save the DB on disk: -# -# save -# -# Will save the DB if both the given number of seconds and the given -# number of write operations against the DB occurred. -# -# In the example below the behaviour will be to save: -# after 900 sec (15 min) if at least 1 key changed -# after 300 sec (5 min) if at least 10 keys changed -# after 60 sec if at least 10000 keys changed -# -# Note: you can disable saving completely by commenting out all "save" lines. -# -# It is also possible to remove all the previously configured save -# points by adding a save directive with a single empty string argument -# like in the following example: -# -# save "" - -save 900 1 -save 300 10 -save 60 10000 - -# By default Redis will stop accepting writes if RDB snapshots are enabled -# (at least one save point) and the latest background save failed. -# This will make the user aware (in a hard way) that data is not persisting -# on disk properly, otherwise chances are that no one will notice and some -# disaster will happen. -# -# If the background saving process will start working again Redis will -# automatically allow writes again. -# -# However if you have setup your proper monitoring of the Redis server -# and persistence, you may want to disable this feature so that Redis will -# continue to work as usual even if there are problems with disk, -# permissions, and so forth. -stop-writes-on-bgsave-error yes - -# Compress string objects using LZF when dump .rdb databases? -# For default that's set to 'yes' as it's almost always a win. -# If you want to save some CPU in the saving child set it to 'no' but -# the dataset will likely be bigger if you have compressible values or keys. -rdbcompression yes - -# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. -# This makes the format more resistant to corruption but there is a performance -# hit to pay (around 10%) when saving and loading RDB files, so you can disable it -# for maximum performances. -# -# RDB files created with checksum disabled have a checksum of zero that will -# tell the loading code to skip the check. -rdbchecksum yes - -# The filename where to dump the DB -dbfilename dump.rdb - -# The working directory. -# -# The DB will be written inside this directory, with the filename specified -# above using the 'dbfilename' configuration directive. -# -# The Append Only File will also be created inside this directory. -# -# Note that you must specify a directory here, not a file name. -dir /var/lib/redis/ - -################################# REPLICATION ################################# - -# Master-Slave replication. Use slaveof to make a Redis instance a copy of -# another Redis server. A few things to understand ASAP about Redis replication. -# -# 1) Redis replication is asynchronous, but you can configure a master to -# stop accepting writes if it appears to be not connected with at least -# a given number of slaves. -# 2) Redis slaves are able to perform a partial resynchronization with the -# master if the replication link is lost for a relatively small amount of -# time. You may want to configure the replication backlog size (see the next -# sections of this file) with a sensible value depending on your needs. -# 3) Replication is automatic and does not need user intervention. After a -# network partition slaves automatically try to reconnect to masters -# and resynchronize with them. -# -# slaveof - -# If the master is password protected (using the "requirepass" configuration -# directive below) it is possible to tell the slave to authenticate before -# starting the replication synchronization process, otherwise the master will -# refuse the slave request. -# -# masterauth - -# When a slave loses its connection with the master, or when the replication -# is still in progress, the slave can act in two different ways: -# -# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will -# still reply to client requests, possibly with out of date data, or the -# data set may just be empty if this is the first synchronization. -# -# 2) if slave-serve-stale-data is set to 'no' the slave will reply with -# an error "SYNC with master in progress" to all the kind of commands -# but to INFO and SLAVEOF. -# -slave-serve-stale-data yes - -# You can configure a slave instance to accept writes or not. Writing against -# a slave instance may be useful to store some ephemeral data (because data -# written on a slave will be easily deleted after resync with the master) but -# may also cause problems if clients are writing to it because of a -# misconfiguration. -# -# Since Redis 2.6 by default slaves are read-only. -# -# Note: read only slaves are not designed to be exposed to untrusted clients -# on the internet. It's just a protection layer against misuse of the instance. -# Still a read only slave exports by default all the administrative commands -# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve -# security of read only slaves using 'rename-command' to shadow all the -# administrative / dangerous commands. -slave-read-only yes - -# Replication SYNC strategy: disk or socket. -# -# ------------------------------------------------------- -# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY -# ------------------------------------------------------- -# -# New slaves and reconnecting slaves that are not able to continue the replication -# process just receiving differences, need to do what is called a "full -# synchronization". An RDB file is transmitted from the master to the slaves. -# The transmission can happen in two different ways: -# -# 1) Disk-backed: The Redis master creates a new process that writes the RDB -# file on disk. Later the file is transferred by the parent -# process to the slaves incrementally. -# 2) Diskless: The Redis master creates a new process that directly writes the -# RDB file to slave sockets, without touching the disk at all. -# -# With disk-backed replication, while the RDB file is generated, more slaves -# can be queued and served with the RDB file as soon as the current child producing -# the RDB file finishes its work. With diskless replication instead once -# the transfer starts, new slaves arriving will be queued and a new transfer -# will start when the current one terminates. -# -# When diskless replication is used, the master waits a configurable amount of -# time (in seconds) before starting the transfer in the hope that multiple slaves -# will arrive and the transfer can be parallelized. -# -# With slow disks and fast (large bandwidth) networks, diskless replication -# works better. -repl-diskless-sync no - -# When diskless replication is enabled, it is possible to configure the delay -# the server waits in order to spawn the child that transfers the RDB via socket -# to the slaves. -# -# This is important since once the transfer starts, it is not possible to serve -# new slaves arriving, that will be queued for the next RDB transfer, so the server -# waits a delay in order to let more slaves arrive. -# -# The delay is specified in seconds, and by default is 5 seconds. To disable -# it entirely just set it to 0 seconds and the transfer will start ASAP. -repl-diskless-sync-delay 5 - -# Slaves send PINGs to server in a predefined interval. It's possible to change -# this interval with the repl_ping_slave_period option. The default value is 10 -# seconds. -# -# repl-ping-slave-period 10 - -# The following option sets the replication timeout for: -# -# 1) Bulk transfer I/O during SYNC, from the point of view of slave. -# 2) Master timeout from the point of view of slaves (data, pings). -# 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). -# -# It is important to make sure that this value is greater than the value -# specified for repl-ping-slave-period otherwise a timeout will be detected -# every time there is low traffic between the master and the slave. -# -# repl-timeout 60 - -# Disable TCP_NODELAY on the slave socket after SYNC? -# -# If you select "yes" Redis will use a smaller number of TCP packets and -# less bandwidth to send data to slaves. But this can add a delay for -# the data to appear on the slave side, up to 40 milliseconds with -# Linux kernels using a default configuration. -# -# If you select "no" the delay for data to appear on the slave side will -# be reduced but more bandwidth will be used for replication. -# -# By default we optimize for low latency, but in very high traffic conditions -# or when the master and slaves are many hops away, turning this to "yes" may -# be a good idea. -repl-disable-tcp-nodelay no - -# Set the replication backlog size. The backlog is a buffer that accumulates -# slave data when slaves are disconnected for some time, so that when a slave -# wants to reconnect again, often a full resync is not needed, but a partial -# resync is enough, just passing the portion of data the slave missed while -# disconnected. -# -# The bigger the replication backlog, the longer the time the slave can be -# disconnected and later be able to perform a partial resynchronization. -# -# The backlog is only allocated once there is at least a slave connected. -# -# repl-backlog-size 1mb - -# After a master has no longer connected slaves for some time, the backlog -# will be freed. The following option configures the amount of seconds that -# need to elapse, starting from the time the last slave disconnected, for -# the backlog buffer to be freed. -# -# Note that slaves never free the backlog for timeout, since they may be -# promoted to masters later, and should be able to correctly "partially -# resynchronize" with the slaves: hence they should always accumulate backlog. -# -# A value of 0 means to never release the backlog. -# -# repl-backlog-ttl 3600 - -# The slave priority is an integer number published by Redis in the INFO output. -# It is used by Redis Sentinel in order to select a slave to promote into a -# master if the master is no longer working correctly. -# -# A slave with a low priority number is considered better for promotion, so -# for instance if there are three slaves with priority 10, 100, 25 Sentinel will -# pick the one with priority 10, that is the lowest. -# -# However a special priority of 0 marks the slave as not able to perform the -# role of master, so a slave with priority of 0 will never be selected by -# Redis Sentinel for promotion. -# -# By default the priority is 100. -slave-priority 100 - -# It is possible for a master to stop accepting writes if there are less than -# N slaves connected, having a lag less or equal than M seconds. -# -# The N slaves need to be in "online" state. -# -# The lag in seconds, that must be <= the specified value, is calculated from -# the last ping received from the slave, that is usually sent every second. -# -# This option does not GUARANTEE that N replicas will accept the write, but -# will limit the window of exposure for lost writes in case not enough slaves -# are available, to the specified number of seconds. -# -# For example to require at least 3 slaves with a lag <= 10 seconds use: -# -# min-slaves-to-write 3 -# min-slaves-max-lag 10 -# -# Setting one or the other to 0 disables the feature. -# -# By default min-slaves-to-write is set to 0 (feature disabled) and -# min-slaves-max-lag is set to 10. - -# A Redis master is able to list the address and port of the attached -# slaves in different ways. For example the "INFO replication" section -# offers this information, which is used, among other tools, by -# Redis Sentinel in order to discover slave instances. -# Another place where this info is available is in the output of the -# "ROLE" command of a master. -# -# The listed IP and address normally reported by a slave is obtained -# in the following way: -# -# IP: The address is auto detected by checking the peer address -# of the socket used by the slave to connect with the master. -# -# Port: The port is communicated by the slave during the replication -# handshake, and is normally the port that the slave is using to -# list for connections. -# -# However when port forwarding or Network Address Translation (NAT) is -# used, the slave may be actually reachable via different IP and port -# pairs. The following two options can be used by a slave in order to -# report to its master a specific set of IP and port, so that both INFO -# and ROLE will report those values. -# -# There is no need to use both the options if you need to override just -# the port or the IP address. -# -# slave-announce-ip 5.5.5.5 -# slave-announce-port 1234 - -################################## SECURITY ################################### - -# Require clients to issue AUTH before processing any other -# commands. This might be useful in environments in which you do not trust -# others with access to the host running redis-server. -# -# This should stay commented out for backward compatibility and because most -# people do not need auth (e.g. they run their own servers). -# -# Warning: since Redis is pretty fast an outside user can try up to -# 150k passwords per second against a good box. This means that you should -# use a very strong password otherwise it will be very easy to break. -# -# requirepass foobared - -# Command renaming. -# -# It is possible to change the name of dangerous commands in a shared -# environment. For instance the CONFIG command may be renamed into something -# hard to guess so that it will still be available for internal-use tools -# but not available for general clients. -# -# Example: -# -# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 -# -# It is also possible to completely kill a command by renaming it into -# an empty string: -# -# rename-command CONFIG "" -# -# Please note that changing the name of commands that are logged into the -# AOF file or transmitted to slaves may cause problems. - -################################### CLIENTS #################################### - -# Set the max number of connected clients at the same time. By default -# this limit is set to 10000 clients, however if the Redis server is not -# able to configure the process file limit to allow for the specified limit -# the max number of allowed clients is set to the current file limit -# minus 32 (as Redis reserves a few file descriptors for internal uses). -# -# Once the limit is reached Redis will close all the new connections sending -# an error 'max number of clients reached'. -# -# maxclients 10000 - -############################## MEMORY MANAGEMENT ################################ - -# Set a memory usage limit to the specified amount of bytes. -# When the memory limit is reached Redis will try to remove keys -# according to the eviction policy selected (see maxmemory-policy). -# -# If Redis can't remove keys according to the policy, or if the policy is -# set to 'noeviction', Redis will start to reply with errors to commands -# that would use more memory, like SET, LPUSH, and so on, and will continue -# to reply to read-only commands like GET. -# -# This option is usually useful when using Redis as an LRU or LFU cache, or to -# set a hard memory limit for an instance (using the 'noeviction' policy). -# -# WARNING: If you have slaves attached to an instance with maxmemory on, -# the size of the output buffers needed to feed the slaves are subtracted -# from the used memory count, so that network problems / resyncs will -# not trigger a loop where keys are evicted, and in turn the output -# buffer of slaves is full with DELs of keys evicted triggering the deletion -# of more keys, and so forth until the database is completely emptied. -# -# In short... if you have slaves attached it is suggested that you set a lower -# limit for maxmemory so that there is some free RAM on the system for slave -# output buffers (but this is not needed if the policy is 'noeviction'). -# -# maxmemory - -# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory -# is reached. You can select among five behaviors: -# -# volatile-lru -> Evict using approximated LRU among the keys with an expire set. -# allkeys-lru -> Evict any key using approximated LRU. -# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. -# allkeys-lfu -> Evict any key using approximated LFU. -# volatile-random -> Remove a random key among the ones with an expire set. -# allkeys-random -> Remove a random key, any key. -# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) -# noeviction -> Don't evict anything, just return an error on write operations. -# -# LRU means Least Recently Used -# LFU means Least Frequently Used -# -# Both LRU, LFU and volatile-ttl are implemented using approximated -# randomized algorithms. -# -# Note: with any of the above policies, Redis will return an error on write -# operations, when there are no suitable keys for eviction. -# -# At the date of writing these commands are: set setnx setex append -# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd -# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby -# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby -# getset mset msetnx exec sort -# -# The default is: -# -# maxmemory-policy noeviction - -# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated -# algorithms (in order to save memory), so you can tune it for speed or -# accuracy. For default Redis will check five keys and pick the one that was -# used less recently, you can change the sample size using the following -# configuration directive. -# -# The default of 5 produces good enough results. 10 Approximates very closely -# true LRU but costs more CPU. 3 is faster but not very accurate. -# -# maxmemory-samples 5 - -############################# LAZY FREEING #################################### - -# Redis has two primitives to delete keys. One is called DEL and is a blocking -# deletion of the object. It means that the server stops processing new commands -# in order to reclaim all the memory associated with an object in a synchronous -# way. If the key deleted is associated with a small object, the time needed -# in order to execute th DEL command is very small and comparable to most other -# O(1) or O(log_N) commands in Redis. However if the key is associated with an -# aggregated value containing millions of elements, the server can block for -# a long time (even seconds) in order to complete the operation. -# -# For the above reasons Redis also offers non blocking deletion primitives -# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and -# FLUSHDB commands, in order to reclaim memory in background. Those commands -# are executed in constant time. Another thread will incrementally free the -# object in the background as fast as possible. -# -# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. -# It's up to the design of the application to understand when it is a good -# idea to use one or the other. However the Redis server sometimes has to -# delete keys or flush the whole database as a side effect of other operations. -# Specifically Redis deletes objects independently of an user call in the -# following scenarios: -# -# 1) On eviction, because of the maxmemory and maxmemory policy configurations, -# in order to make room for new data, without going over the specified -# memory limit. -# 2) Because of expire: when a key with an associated time to live (see the -# EXPIRE command) must be deleted from memory. -# 3) Because of a side effect of a command that stores data on a key that may -# already exist. For example the RENAME command may delete the old key -# content when it is replaced with another one. Similarly SUNIONSTORE -# or SORT with STORE option may delete existing keys. The SET command -# itself removes any old content of the specified key in order to replace -# it with the specified string. -# 4) During replication, when a slave performs a full resynchronization with -# its master, the content of the whole database is removed in order to -# load the RDB file just transfered. -# -# In all the above cases the default is to delete objects in a blocking way, -# like if DEL was called. However you can configure each case specifically -# in order to instead release memory in a non-blocking way like if UNLINK -# was called, using the following configuration directives: - -lazyfree-lazy-eviction no -lazyfree-lazy-expire no -lazyfree-lazy-server-del no -slave-lazy-flush no - -############################## APPEND ONLY MODE ############################### - -# By default Redis asynchronously dumps the dataset on disk. This mode is -# good enough in many applications, but an issue with the Redis process or -# a power outage may result into a few minutes of writes lost (depending on -# the configured save points). -# -# The Append Only File is an alternative persistence mode that provides -# much better durability. For instance using the default data fsync policy -# (see later in the config file) Redis can lose just one second of writes in a -# dramatic event like a server power outage, or a single write if something -# wrong with the Redis process itself happens, but the operating system is -# still running correctly. -# -# AOF and RDB persistence can be enabled at the same time without problems. -# If the AOF is enabled on startup Redis will load the AOF, that is the file -# with the better durability guarantees. -# -# Please check http://redis.io/topics/persistence for more information. - -appendonly no - -# The name of the append only file (default: "appendonly.aof") - -appendfilename "appendonly.aof" - -# The fsync() call tells the Operating System to actually write data on disk -# instead of waiting for more data in the output buffer. Some OS will really flush -# data on disk, some other OS will just try to do it ASAP. -# -# Redis supports three different modes: -# -# no: don't fsync, just let the OS flush the data when it wants. Faster. -# always: fsync after every write to the append only log. Slow, Safest. -# everysec: fsync only one time every second. Compromise. -# -# The default is "everysec", as that's usually the right compromise between -# speed and data safety. It's up to you to understand if you can relax this to -# "no" that will let the operating system flush the output buffer when -# it wants, for better performances (but if you can live with the idea of -# some data loss consider the default persistence mode that's snapshotting), -# or on the contrary, use "always" that's very slow but a bit safer than -# everysec. -# -# More details please check the following article: -# http://antirez.com/post/redis-persistence-demystified.html -# -# If unsure, use "everysec". - -# appendfsync always -appendfsync everysec -# appendfsync no - -# When the AOF fsync policy is set to always or everysec, and a background -# saving process (a background save or AOF log background rewriting) is -# performing a lot of I/O against the disk, in some Linux configurations -# Redis may block too long on the fsync() call. Note that there is no fix for -# this currently, as even performing fsync in a different thread will block -# our synchronous write(2) call. -# -# In order to mitigate this problem it's possible to use the following option -# that will prevent fsync() from being called in the main process while a -# BGSAVE or BGREWRITEAOF is in progress. -# -# This means that while another child is saving, the durability of Redis is -# the same as "appendfsync none". In practical terms, this means that it is -# possible to lose up to 30 seconds of log in the worst scenario (with the -# default Linux settings). -# -# If you have latency problems turn this to "yes". Otherwise leave it as -# "no" that is the safest pick from the point of view of durability. - -no-appendfsync-on-rewrite no - -# Automatic rewrite of the append only file. -# Redis is able to automatically rewrite the log file implicitly calling -# BGREWRITEAOF when the AOF log size grows by the specified percentage. -# -# This is how it works: Redis remembers the size of the AOF file after the -# latest rewrite (if no rewrite has happened since the restart, the size of -# the AOF at startup is used). -# -# This base size is compared to the current size. If the current size is -# bigger than the specified percentage, the rewrite is triggered. Also -# you need to specify a minimal size for the AOF file to be rewritten, this -# is useful to avoid rewriting the AOF file even if the percentage increase -# is reached but it is still pretty small. -# -# Specify a percentage of zero in order to disable the automatic AOF -# rewrite feature. - -auto-aof-rewrite-percentage 100 -auto-aof-rewrite-min-size 64mb - -# An AOF file may be found to be truncated at the end during the Redis -# startup process, when the AOF data gets loaded back into memory. -# This may happen when the system where Redis is running -# crashes, especially when an ext4 filesystem is mounted without the -# data=ordered option (however this can't happen when Redis itself -# crashes or aborts but the operating system still works correctly). -# -# Redis can either exit with an error when this happens, or load as much -# data as possible (the default now) and start if the AOF file is found -# to be truncated at the end. The following option controls this behavior. -# -# If aof-load-truncated is set to yes, a truncated AOF file is loaded and -# the Redis server starts emitting a log to inform the user of the event. -# Otherwise if the option is set to no, the server aborts with an error -# and refuses to start. When the option is set to no, the user requires -# to fix the AOF file using the "redis-check-aof" utility before to restart -# the server. -# -# Note that if the AOF file will be found to be corrupted in the middle -# the server will still exit with an error. This option only applies when -# Redis will try to read more data from the AOF file but not enough bytes -# will be found. -aof-load-truncated yes - -# When rewriting the AOF file, Redis is able to use an RDB preamble in the -# AOF file for faster rewrites and recoveries. When this option is turned -# on the rewritten AOF file is composed of two different stanzas: -# -# [RDB file][AOF tail] -# -# When loading Redis recognizes that the AOF file starts with the "REDIS" -# string and loads the prefixed RDB file, and continues loading the AOF -# tail. -# -# This is currently turned off by default in order to avoid the surprise -# of a format change, but will at some point be used as the default. -aof-use-rdb-preamble no - -################################ LUA SCRIPTING ############################### - -# Max execution time of a Lua script in milliseconds. -# -# If the maximum execution time is reached Redis will log that a script is -# still in execution after the maximum allowed time and will start to -# reply to queries with an error. -# -# When a long running script exceeds the maximum execution time only the -# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be -# used to stop a script that did not yet called write commands. The second -# is the only way to shut down the server in the case a write command was -# already issued by the script but the user doesn't want to wait for the natural -# termination of the script. -# -# Set it to 0 or a negative value for unlimited execution without warnings. -lua-time-limit 5000 - -################################ REDIS CLUSTER ############################### -# -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however -# in order to mark it as "mature" we need to wait for a non trivial percentage -# of users to deploy it in production. -# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -# -# Normal Redis instances can't be part of a Redis Cluster; only nodes that are -# started as cluster nodes can. In order to start a Redis instance as a -# cluster node enable the cluster support uncommenting the following: -# -# cluster-enabled yes - -# Every cluster node has a cluster configuration file. This file is not -# intended to be edited by hand. It is created and updated by Redis nodes. -# Every Redis Cluster node requires a different cluster configuration file. -# Make sure that instances running in the same system do not have -# overlapping cluster configuration file names. -# -# cluster-config-file nodes-6379.conf - -# Cluster node timeout is the amount of milliseconds a node must be unreachable -# for it to be considered in failure state. -# Most other internal time limits are multiple of the node timeout. -# -# cluster-node-timeout 15000 - -# A slave of a failing master will avoid to start a failover if its data -# looks too old. -# -# There is no simple way for a slave to actually have an exact measure of -# its "data age", so the following two checks are performed: -# -# 1) If there are multiple slaves able to failover, they exchange messages -# in order to try to give an advantage to the slave with the best -# replication offset (more data from the master processed). -# Slaves will try to get their rank by offset, and apply to the start -# of the failover a delay proportional to their rank. -# -# 2) Every single slave computes the time of the last interaction with -# its master. This can be the last ping or command received (if the master -# is still in the "connected" state), or the time that elapsed since the -# disconnection with the master (if the replication link is currently down). -# If the last interaction is too old, the slave will not try to failover -# at all. -# -# The point "2" can be tuned by user. Specifically a slave will not perform -# the failover if, since the last interaction with the master, the time -# elapsed is greater than: -# -# (node-timeout * slave-validity-factor) + repl-ping-slave-period -# -# So for example if node-timeout is 30 seconds, and the slave-validity-factor -# is 10, and assuming a default repl-ping-slave-period of 10 seconds, the -# slave will not try to failover if it was not able to talk with the master -# for longer than 310 seconds. -# -# A large slave-validity-factor may allow slaves with too old data to failover -# a master, while a too small value may prevent the cluster from being able to -# elect a slave at all. -# -# For maximum availability, it is possible to set the slave-validity-factor -# to a value of 0, which means, that slaves will always try to failover the -# master regardless of the last time they interacted with the master. -# (However they'll always try to apply a delay proportional to their -# offset rank). -# -# Zero is the only value able to guarantee that when all the partitions heal -# the cluster will always be able to continue. -# -# cluster-slave-validity-factor 10 - -# Cluster slaves are able to migrate to orphaned masters, that are masters -# that are left without working slaves. This improves the cluster ability -# to resist to failures as otherwise an orphaned master can't be failed over -# in case of failure if it has no working slaves. -# -# Slaves migrate to orphaned masters only if there are still at least a -# given number of other working slaves for their old master. This number -# is the "migration barrier". A migration barrier of 1 means that a slave -# will migrate only if there is at least 1 other working slave for its master -# and so forth. It usually reflects the number of slaves you want for every -# master in your cluster. -# -# Default is 1 (slaves migrate only if their masters remain with at least -# one slave). To disable migration just set it to a very large value. -# A value of 0 can be set but is useful only for debugging and dangerous -# in production. -# -# cluster-migration-barrier 1 - -# By default Redis Cluster nodes stop accepting queries if they detect there -# is at least an hash slot uncovered (no available node is serving it). -# This way if the cluster is partially down (for example a range of hash slots -# are no longer covered) all the cluster becomes, eventually, unavailable. -# It automatically returns available as soon as all the slots are covered again. -# -# However sometimes you want the subset of the cluster which is working, -# to continue to accept queries for the part of the key space that is still -# covered. In order to do so, just set the cluster-require-full-coverage -# option to no. -# -# cluster-require-full-coverage yes - -# In order to setup your cluster make sure to read the documentation -# available at http://redis.io web site. - -########################## CLUSTER DOCKER/NAT support ######################## - -# In certain deployments, Redis Cluster nodes address discovery fails, because -# addresses are NAT-ted or because ports are forwarded (the typical case is -# Docker and other containers). -# -# In order to make Redis Cluster working in such environments, a static -# configuration where each node known its public address is needed. The -# following two options are used for this scope, and are: -# -# * cluster-announce-ip -# * cluster-announce-port -# * cluster-announce-bus-port -# -# Each instruct the node about its address, client port, and cluster message -# bus port. The information is then published in the header of the bus packets -# so that other nodes will be able to correctly map the address of the node -# publishing the information. -# -# If the above options are not used, the normal Redis Cluster auto-detection -# will be used instead. -# -# Note that when remapped, the bus port may not be at the fixed offset of -# clients port + 10000, so you can specify any port and bus-port depending -# on how they get remapped. If the bus-port is not set, a fixed offset of -# 10000 will be used as usually. -# -# Example: -# -# cluster-announce-ip 10.1.1.5 -# cluster-announce-port 6379 -# cluster-announce-bus-port 6380 - -################################## SLOW LOG ################################### - -# The Redis Slow Log is a system to log queries that exceeded a specified -# execution time. The execution time does not include the I/O operations -# like talking with the client, sending the reply and so forth, -# but just the time needed to actually execute the command (this is the only -# stage of command execution where the thread is blocked and can not serve -# other requests in the meantime). -# -# You can configure the slow log with two parameters: one tells Redis -# what is the execution time, in microseconds, to exceed in order for the -# command to get logged, and the other parameter is the length of the -# slow log. When a new command is logged the oldest one is removed from the -# queue of logged commands. - -# The following time is expressed in microseconds, so 1000000 is equivalent -# to one second. Note that a negative number disables the slow log, while -# a value of zero forces the logging of every command. -slowlog-log-slower-than 10000 - -# There is no limit to this length. Just be aware that it will consume memory. -# You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 128 - -################################ LATENCY MONITOR ############################## - -# The Redis latency monitoring subsystem samples different operations -# at runtime in order to collect data related to possible sources of -# latency of a Redis instance. -# -# Via the LATENCY command this information is available to the user that can -# print graphs and obtain reports. -# -# The system only logs operations that were performed in a time equal or -# greater than the amount of milliseconds specified via the -# latency-monitor-threshold configuration directive. When its value is set -# to zero, the latency monitor is turned off. -# -# By default latency monitoring is disabled since it is mostly not needed -# if you don't have latency issues, and collecting data has a performance -# impact, that while very small, can be measured under big load. Latency -# monitoring can easily be enabled at runtime using the command -# "CONFIG SET latency-monitor-threshold " if needed. -latency-monitor-threshold 0 - -############################# EVENT NOTIFICATION ############################## - -# Redis can notify Pub/Sub clients about events happening in the key space. -# This feature is documented at http://redis.io/topics/notifications -# -# For instance if keyspace events notification is enabled, and a client -# performs a DEL operation on key "foo" stored in the Database 0, two -# messages will be published via Pub/Sub: -# -# PUBLISH __keyspace@0__:foo del -# PUBLISH __keyevent@0__:del foo -# -# It is possible to select the events that Redis will notify among a set -# of classes. Every class is identified by a single character: -# -# K Keyspace events, published with __keyspace@__ prefix. -# E Keyevent events, published with __keyevent@__ prefix. -# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... -# $ String commands -# l List commands -# s Set commands -# h Hash commands -# z Sorted set commands -# x Expired events (events generated every time a key expires) -# e Evicted events (events generated when a key is evicted for maxmemory) -# A Alias for g$lshzxe, so that the "AKE" string means all the events. -# -# The "notify-keyspace-events" takes as argument a string that is composed -# of zero or multiple characters. The empty string means that notifications -# are disabled. -# -# Example: to enable list and generic events, from the point of view of the -# event name, use: -# -# notify-keyspace-events Elg -# -# Example 2: to get the stream of the expired keys subscribing to channel -# name __keyevent@0__:expired use: -# -# notify-keyspace-events Ex -# -# By default all notifications are disabled because most users don't need -# this feature and the feature has some overhead. Note that if you don't -# specify at least one of K or E, no events will be delivered. -notify-keyspace-events "" - -############################### ADVANCED CONFIG ############################### - -# Hashes are encoded using a memory efficient data structure when they have a -# small number of entries, and the biggest entry does not exceed a given -# threshold. These thresholds can be configured using the following directives. -hash-max-ziplist-entries 512 -hash-max-ziplist-value 64 - -# Lists are also encoded in a special way to save a lot of space. -# The number of entries allowed per internal list node can be specified -# as a fixed maximum size or a maximum number of elements. -# For a fixed maximum size, use -5 through -1, meaning: -# -5: max size: 64 Kb <-- not recommended for normal workloads -# -4: max size: 32 Kb <-- not recommended -# -3: max size: 16 Kb <-- probably not recommended -# -2: max size: 8 Kb <-- good -# -1: max size: 4 Kb <-- good -# Positive numbers mean store up to _exactly_ that number of elements -# per list node. -# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), -# but if your use case is unique, adjust the settings as necessary. -list-max-ziplist-size -2 - -# Lists may also be compressed. -# Compress depth is the number of quicklist ziplist nodes from *each* side of -# the list to *exclude* from compression. The head and tail of the list -# are always uncompressed for fast push/pop operations. Settings are: -# 0: disable all list compression -# 1: depth 1 means "don't start compressing until after 1 node into the list, -# going from either the head or tail" -# So: [head]->node->node->...->node->[tail] -# [head], [tail] will always be uncompressed; inner nodes will compress. -# 2: [head]->[next]->node->node->...->node->[prev]->[tail] -# 2 here means: don't compress head or head->next or tail->prev or tail, -# but compress all nodes between them. -# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] -# etc. -list-compress-depth 0 - -# Sets have a special encoding in just one case: when a set is composed -# of just strings that happen to be integers in radix 10 in the range -# of 64 bit signed integers. -# The following configuration setting sets the limit in the size of the -# set in order to use this special memory saving encoding. -set-max-intset-entries 512 - -# Similarly to hashes and lists, sorted sets are also specially encoded in -# order to save a lot of space. This encoding is only used when the length and -# elements of a sorted set are below the following limits: -zset-max-ziplist-entries 128 -zset-max-ziplist-value 64 - -# HyperLogLog sparse representation bytes limit. The limit includes the -# 16 bytes header. When an HyperLogLog using the sparse representation crosses -# this limit, it is converted into the dense representation. -# -# A value greater than 16000 is totally useless, since at that point the -# dense representation is more memory efficient. -# -# The suggested value is ~ 3000 in order to have the benefits of -# the space efficient encoding without slowing down too much PFADD, -# which is O(N) with the sparse encoding. The value can be raised to -# ~ 10000 when CPU is not a concern, but space is, and the data set is -# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. -hll-sparse-max-bytes 3000 - -# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in -# order to help rehashing the main Redis hash table (the one mapping top-level -# keys to values). The hash table implementation Redis uses (see dict.c) -# performs a lazy rehashing: the more operation you run into a hash table -# that is rehashing, the more rehashing "steps" are performed, so if the -# server is idle the rehashing is never complete and some more memory is used -# by the hash table. -# -# The default is to use this millisecond 10 times every second in order to -# actively rehash the main dictionaries, freeing memory when possible. -# -# If unsure: -# use "activerehashing no" if you have hard latency requirements and it is -# not a good thing in your environment that Redis can reply from time to time -# to queries with 2 milliseconds delay. -# -# use "activerehashing yes" if you don't have such hard requirements but -# want to free memory asap when possible. -activerehashing yes - -# The client output buffer limits can be used to force disconnection of clients -# that are not reading data from the server fast enough for some reason (a -# common reason is that a Pub/Sub client can't consume messages as fast as the -# publisher can produce them). -# -# The limit can be set differently for the three different classes of clients: -# -# normal -> normal clients including MONITOR clients -# slave -> slave clients -# pubsub -> clients subscribed to at least one pubsub channel or pattern -# -# The syntax of every client-output-buffer-limit directive is the following: -# -# client-output-buffer-limit -# -# A client is immediately disconnected once the hard limit is reached, or if -# the soft limit is reached and remains reached for the specified number of -# seconds (continuously). -# So for instance if the hard limit is 32 megabytes and the soft limit is -# 16 megabytes / 10 seconds, the client will get disconnected immediately -# if the size of the output buffers reach 32 megabytes, but will also get -# disconnected if the client reaches 16 megabytes and continuously overcomes -# the limit for 10 seconds. -# -# By default normal clients are not limited because they don't receive data -# without asking (in a push way), but just after a request, so only -# asynchronous clients may create a scenario where data is requested faster -# than it can read. -# -# Instead there is a default limit for pubsub and slave clients, since -# subscribers and slaves receive data in a push fashion. -# -# Both the hard or the soft limit can be disabled by setting them to zero. -client-output-buffer-limit normal 0 0 0 -client-output-buffer-limit slave 256mb 64mb 60 -client-output-buffer-limit pubsub 32mb 8mb 60 - -# Redis calls an internal function to perform many background tasks, like -# closing connections of clients in timeout, purging expired keys that are -# never requested, and so forth. -# -# Not all tasks are performed with the same frequency, but Redis checks for -# tasks to perform according to the specified "hz" value. -# -# By default "hz" is set to 10. Raising the value will use more CPU when -# Redis is idle, but at the same time will make Redis more responsive when -# there are many keys expiring at the same time, and timeouts may be -# handled with more precision. -# -# The range is between 1 and 500, however a value over 100 is usually not -# a good idea. Most users should use the default of 10 and raise this up to -# 100 only in environments where very low latency is required. -hz 10 - -# When a child rewrites the AOF file, if the following option is enabled -# the file will be fsync-ed every 32 MB of data generated. This is useful -# in order to commit the file to the disk more incrementally and avoid -# big latency spikes. -aof-rewrite-incremental-fsync yes - -# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good -# idea to start with the default settings and only change them after investigating -# how to improve the performances and how the keys LFU change over time, which -# is possible to inspect via the OBJECT FREQ command. -# -# There are two tunable parameters in the Redis LFU implementation: the -# counter logarithm factor and the counter decay time. It is important to -# understand what the two parameters mean before changing them. -# -# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis -# uses a probabilistic increment with logarithmic behavior. Given the value -# of the old counter, when a key is accessed, the counter is incremented in -# this way: -# -# 1. A random number R between 0 and 1 is extracted. -# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). -# 3. The counter is incremented only if R < P. -# -# The default lfu-log-factor is 10. This is a table of how the frequency -# counter changes with a different number of accesses with different -# logarithmic factors: -# -# +--------+------------+------------+------------+------------+------------+ -# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | -# +--------+------------+------------+------------+------------+------------+ -# | 0 | 104 | 255 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 1 | 18 | 49 | 255 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 10 | 10 | 18 | 142 | 255 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# | 100 | 8 | 11 | 49 | 143 | 255 | -# +--------+------------+------------+------------+------------+------------+ -# -# NOTE: The above table was obtained by running the following commands: -# -# redis-benchmark -n 1000000 incr foo -# redis-cli object freq foo -# -# NOTE 2: The counter initial value is 5 in order to give new objects a chance -# to accumulate hits. -# -# The counter decay time is the time, in minutes, that must elapse in order -# for the key counter to be divided by two (or decremented if it has a value -# less <= 10). -# -# The default value for the lfu-decay-time is 1. A Special value of 0 means to -# decay the counter every time it happens to be scanned. -# -# lfu-log-factor 10 -# lfu-decay-time 1 - -########################### ACTIVE DEFRAGMENTATION ####################### -# -# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested -# even in production and manually tested by multiple engineers for some -# time. -# -# What is active defragmentation? -# ------------------------------- -# -# Active (online) defragmentation allows a Redis server to compact the -# spaces left between small allocations and deallocations of data in memory, -# thus allowing to reclaim back memory. -# -# Fragmentation is a natural process that happens with every allocator (but -# less so with Jemalloc, fortunately) and certain workloads. Normally a server -# restart is needed in order to lower the fragmentation, or at least to flush -# away all the data and create it again. However thanks to this feature -# implemented by Oran Agra for Redis 4.0 this process can happen at runtime -# in an "hot" way, while the server is running. -# -# Basically when the fragmentation is over a certain level (see the -# configuration options below) Redis will start to create new copies of the -# values in contiguous memory regions by exploiting certain specific Jemalloc -# features (in order to understand if an allocation is causing fragmentation -# and to allocate it in a better place), and at the same time, will release the -# old copies of the data. This process, repeated incrementally for all the keys -# will cause the fragmentation to drop back to normal values. -# -# Important things to understand: -# -# 1. This feature is disabled by default, and only works if you compiled Redis -# to use the copy of Jemalloc we ship with the source code of Redis. -# This is the default with Linux builds. -# -# 2. You never need to enable this feature if you don't have fragmentation -# issues. -# -# 3. Once you experience fragmentation, you can enable this feature when -# needed with the command "CONFIG SET activedefrag yes". -# -# The configuration parameters are able to fine tune the behavior of the -# defragmentation process. If you are not sure about what they mean it is -# a good idea to leave the defaults untouched. - -# Enabled active defragmentation -# activedefrag yes - -# Minimum amount of fragmentation waste to start active defrag -# active-defrag-ignore-bytes 100mb - -# Minimum percentage of fragmentation to start active defrag -# active-defrag-threshold-lower 10 - -# Maximum percentage of fragmentation at which we use maximum effort -# active-defrag-threshold-upper 100 - -# Minimal effort for defrag in CPU percentage -# active-defrag-cycle-min 25 - -# Maximal effort for defrag in CPU percentage -# active-defrag-cycle-max 75 - diff --git a/states/roles/maintain/gitlabarch/conf_files/resque.yml b/states/roles/maintain/gitlabarch/conf_files/resque.yml deleted file mode 100644 index 6c7944f..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/resque.yml +++ /dev/null @@ -1,34 +0,0 @@ -# If you change this file in a Merge Request, please also create -# a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests -# -development: - url: unix:/run/redis/redis.sock - # sentinels: - # - - # host: localhost - # port: 26380 # point to sentinel, not to redis port - # - - # host: slave2 - # port: 26381 # point to sentinel, not to redis port -test: - url: unix:/run/redis/redis.sock -production: - # Redis (single instance) - url: unix:/run/redis/redis.sock - ## - # Redis + Sentinel (for HA) - # - # Please read instructions carefully before using it as you may lose data: - # http://redis.io/topics/sentinel - # - # You must specify a list of a few sentinels that will handle client connection - # please read here for more information: https://docs.gitlab.com/ce/administration/high_availability/redis.html - ## - # url: redis://master:6379 - # sentinels: - # - - # host: slave1 - # port: 26379 # point to sentinel, not to redis port - # - - # host: slave2 - # port: 26379 # point to sentinel, not to redis port diff --git a/states/roles/maintain/gitlabarch/conf_files/smtp_settings.rb b/states/roles/maintain/gitlabarch/conf_files/smtp_settings.rb deleted file mode 100644 index ebc93e9..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/smtp_settings.rb +++ /dev/null @@ -1,23 +0,0 @@ -# To enable smtp email delivery for your GitLab instance do the following: -# 1. Rename this file to smtp_settings.rb -# 2. Edit settings inside this file -# 3. Restart GitLab instance -# -# For full list of options and their values see http://api.rubyonrails.org/classes/ActionMailer/Base.html -# -# If you change this file in a Merge Request, please also create a Merge Request on https://gitlab.com/gitlab-org/omnibus-gitlab/merge_requests - -if Rails.env.production? - Rails.application.config.action_mailer.delivery_method = :smtp - - ActionMailer::Base.delivery_method = :smtp - ActionMailer::Base.smtp_settings = { - authentication: :plain, - address: "smtp.zoho.com", - port: 587, - user_name: "notifications@actcur.com", - password: "{%- include 'secure/passwords/gitlab_smtp_password.txt' -%}", - domain: "smtp.zoho.com", - enable_starttls_auto: true, - } -end diff --git a/states/roles/maintain/gitlabarch/conf_files/tmp_redis.conf b/states/roles/maintain/gitlabarch/conf_files/tmp_redis.conf deleted file mode 100644 index 773b8ea..0000000 --- a/states/roles/maintain/gitlabarch/conf_files/tmp_redis.conf +++ /dev/null @@ -1 +0,0 @@ -d /run/redis 0755 redis redis - diff --git a/states/roles/maintain/gitlabarch/init.sls b/states/roles/maintain/gitlabarch/init.sls deleted file mode 100644 index 2351299..0000000 --- a/states/roles/maintain/gitlabarch/init.sls +++ /dev/null @@ -1,175 +0,0 @@ -gitlab: - pkg.installed -mariadb: - pkg.installed -gitlab_nginx: - pkg.installed: - - name: nginx - -#managed files -/etc/webapps/gitlab/gitlab.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/gitlab.yml - - user: root - - group: root - - mode: 644 -/etc/webapps/gitlab/database.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/database.yml - - user: gitlab - - group: gitlab - - mode: 600 - - template: jinja -/etc/webapps/gitlab/resque.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/resque.yml - - user: root - - group: root - - mode: 644 -/etc/webapps/gitlab-shell/config.yml: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/config.yml - - user: gitlab - - group: gitlab - - mode: 600 -/usr/share/webapps/gitlab/config/initializers/smtp_settings.rb: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/smtp_settings.rb - - user: root - - group: root - - mode: 644 - - template: jinja -/usr/share/webapps/gitlab/config/environments/production.rb: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/production.rb - - user: root - - group: root - - mode: 644 -/etc/redis.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/redis.conf - - user: root - - group: root - - mode: 644 -/etc/tempfiles.d/redis.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/tmp_redis.conf - - user: root - - group: root - - mode: 644 - - makedirs: true -/etc/nginx/conf.d/gitlab.conf: - file.managed: - - source: salt://roles/maintain/gitlab/conf_files/gitlab.conf - - user: root - - group: root - - makedirs: true - - dir_mode: 755 - - mode: 644 - -#add users git and gitlab to redis group -git_user: - user.present: - - name: git - - groups: - - redis -gitlab_user: - user.present: - - name: gitlab - - groups: - - redis - -#migrate redis database as gitlab user if necessary -redis-running: - service.running: - - name: redis - - enable: true - - watch: - - file: /etc/redis.conf - - file: /etc/tempfiles.d/redis.conf -gitlab_rake_db: - cmd.run: - - name: "bundle-2.3 exec rake db:migrate RAILS_ENV=production" - - cwd: "/usr/share/webapps/gitlab" - - runas: gitlab - - watch: - - pkg: gitlab - -#global git configuration -gitlab_git_name: - git.config_set: - - name: user.name - - value: "Actaeus Curabitur" - - user: gitlab - - global: true -gitlab_git_email: - git.config_set: - - name: user.email - - value: "actcur@actcur.com" - - user: gitlab - - global: true -gitlab_git_crlf: - git.config_set: - - name: core.autocrlf - - value: "input" - - user: gitlab - - global: true - -#create symlink -symlink_repos: - file.symlink: - - name: /var/lib/gitlab/repositories - - target: /mnt/repos - - force: true -#verify perms for repos are right -/var/lib/gitlab/repositories/: - file.directory: - - user: gitlab - - group: gitlab - - dir_mode: 4770 - -#start services -gitlab.target: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-workhorse: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-unicorn: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb -gitlab-sidekiq: - service.running: - - enable: true - - watch: - - file: /etc/webapps/gitlab/gitlab.yml - - file: /etc/webapps/gitlab/database.yml - - file: /etc/webapps/gitlab/resque.yml - - file: /etc/webapps/gitlab-shell/config.yml - - file: /etc/nginx/conf.d/gitlab.conf - - file: /usr/share/webapps/gitlab/config/initializers/smtp_settings.rb - - file: /usr/share/webapps/gitlab/config/environments/production.rb diff --git a/states/roles/maintain/icinga/conf.d/hosts.conf b/states/roles/maintain/icinga/conf.d/hosts.conf new file mode 100644 index 0000000..aea039d --- /dev/null +++ b/states/roles/maintain/icinga/conf.d/hosts.conf @@ -0,0 +1,18 @@ +{% set states = salt['cp.list_states'](saltenv) %} +{%- for state in states %} + {%- if state.startswith("pillars.servers.roles.server.") -%} + {%- set server = state.split('.')[4] %} + {% set role_data = salt['file.read']('/etc/icinga2/server_roles/'+server+'.sls')|load_yaml %} +object Host "{{server}}.actcur.com" { + import "generic-host" + address = "{{server}}.actcur.com" + + {%- if role_data['grains'] is defined %} + {%- if role_data['grains']['roles'] is defined %} + vars.roles=[{%- for role in role_data['grains']['roles'] %}"{{role}}",{%- endfor -%}""]; + {%- endif -%} + {%- endif %} + +} + {%- endif -%} +{%- endfor %} diff --git a/states/roles/maintain/icinga/conf.d/services/core.conf b/states/roles/maintain/icinga/conf.d/services/core.conf new file mode 100644 index 0000000..58bb91f --- /dev/null +++ b/states/roles/maintain/icinga/conf.d/services/core.conf @@ -0,0 +1,49 @@ +apply Service "npre_disk-root" { + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_disk" + vars.nrpe_arguments = [ "-w 20% -c 10% -p /" ] + + assign where host.address && host.vars.os == "Arch Linux" +} + +apply Service "npre_load"{ + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_load" + vars.nrpe_arguments = [ "-w 15,10,5 -c 30,20,10" ] + + assign where host.address && host.vars.os == "Arch Linux" +} + +apply Service "npre_swap"{ + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_swap" + vars.nrpe_arguments = [ "-w 20% -c 10%" ] + + assign where host.address && host.vars.os == "Arch Linux" +} + +apply Service "npre_cpu"{ + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_cpu" + vars.nrpe_arguments = [ "" ] + + assign where host.address && host.vars.os == "Arch Linux" +} + +apply Service "npre_mem"{ + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_mem" + vars.nrpe_arguments = [ "-w 80 -c 90" ] + + assign where host.address && host.vars.os == "Arch Linux" +} diff --git a/states/roles/maintain/icinga/conf.d/services/service.conf b/states/roles/maintain/icinga/conf.d/services/service.conf new file mode 100644 index 0000000..53a4b28 --- /dev/null +++ b/states/roles/maintain/icinga/conf.d/services/service.conf @@ -0,0 +1,22 @@ +{%- if services is defined %} + {%- for role in services %} + {%- if services[role] is defined %} + {%- for service in services[role] %} + {%- if role == "core" -%} + {% set role_restriction = '' %} + {%- else -%} + {% set role_restriction = '&& "'+role+'" in host.vars.roles' %} + {%- endif %} +apply Service "nrpe_service_{{role}}_{{ service }}"{ + import "generic-service" + + check_command = "nrpe" + vars.nrpe_command = "check_service" + vars.nrpe_arguments = [ "{{ service }}" ] + + assign where host.address {{role_restriction}} +} + {%- endfor -%} + {%- endif -%} + {%- endfor -%} +{%- endif -%} diff --git a/states/roles/maintain/icinga/init.sls b/states/roles/maintain/icinga/init.sls index cade930..cb12057 100644 --- a/states/roles/maintain/icinga/init.sls +++ b/states/roles/maintain/icinga/init.sls @@ -1,10 +1,8 @@ - +{% set states = salt['cp.list_states'](saltenv) %} icinga2: pkg.installed icingaweb2: pkg.installed -icingaweb2-module-director: - pkg.installed icinga-php: pkg.installed: - name: php @@ -56,14 +54,54 @@ icinga-mysql-client: - target: /etc/icinga2/features-available/ido-mysql.conf +/etc/icinga2/server_roles/: + file.recurse: + - source: salt://pillars/servers/roles/server/ + - user: root + - group: root + - clean: true + - makedirs: true + - file_mode: 600 + - dir_mode: 755 + +/etc/icinga2/conf.d/hosts.conf: + file.managed: + - source: salt://roles/maintain/icinga/conf.d/hosts.conf + - user: icinga + - group: icinga + - mode: 640 + - makedirs: true + - template: jinja + +/etc/icinga2/conf.d/services/core.conf: + file.managed: + - source: salt://roles/maintain/icinga/conf.d/services/core.conf + - user: icinga + - group: icinga + - makedirs: true + - mode: 640 + +/etc/icinga2/conf.d/services/service.conf: + file.managed: + - source: salt://roles/maintain/icinga/conf.d/services/service.conf + - user: icinga + - group: icinga + - makedirs: true + - mode: 640 + - template: jinja + - context: + services: {{ pillar['services'] }} + icinga2-service: service.running: - name: icinga2 - enable: true + - watch: + - file: /etc/icinga2/conf.d/*/* + #databases: #icinga2_ido - #icinga2_director #icinga2_web #need to create icinga ido db and generate schema from /usr/share/icinga2-ido-mysql/schema/mysql.sql diff --git a/states/roles/maintain/lam/config.cfg b/states/roles/maintain/lam/config.cfg deleted file mode 100644 index 53f5736..0000000 --- a/states/roles/maintain/lam/config.cfg +++ /dev/null @@ -1,58 +0,0 @@ - -# password to add/delete/rename configuration profiles (default: lam) -password: {SSHA}P9Ne8ZSKD4QHuHkFe8ayVIKSnvE= XjHBmA== - -# default profile, without ".conf" -default: ldap-root - -# log level -logLevel: 4 - -# log destination -logDestination: SYSLOG - - -# session timeout in minutes -sessionTimeout: 30 - -# list of hosts which may access LAM -allowedHosts: - -# list of hosts which may access LAM Pro self service -allowedHostsSelfService: - -# encrypt session data -encryptSession: true - -# Password: minimum password length -passwordMinLength: 0 - -# Password: minimum uppercase characters -passwordMinUpper: 0 - -# Password: minimum lowercase characters -passwordMinLower: 0 - -# Password: minimum numeric characters -passwordMinNumeric: 0 - -# Password: minimum symbolic characters -passwordMinSymbol: 0 - -# Password: minimum character classes (0-4) -passwordMinClasses: 0 - -# Password: checked rules -checkedRulesCount: -1 - -# Password: must not contain part of user name -passwordMustNotContain3Chars: false - -# Password: must not contain user name -passwordMustNotContainUser: false - -# Email format (default/unix) -mailEOL: default - -# PHP error reporting (default/system) -errorReporting: default diff --git a/states/roles/maintain/lam/init.sls b/states/roles/maintain/lam/init.sls deleted file mode 100644 index 99b0de2..0000000 --- a/states/roles/maintain/lam/init.sls +++ /dev/null @@ -1,59 +0,0 @@ -php: - pkg.installed: - - pkgs: - - php56 - - php56-fpm - - php56-ldap - service.running: - - name: php56-fpm - - enable: true - - watch: - - file: /etc/php56/php.ini - -ldap-account-manager: - pkg.installed - -/etc/php56/php.ini: - file.managed: - - source: salt://roles/maintain/lam/php.ini - - user: root - - group: root - - mode: 644 - -/etc/nginx/conf.d/lam-server.conf: - file.managed: - - source: salt://roles/maintain/lam/lam-server.conf - - user: root - - group: root - - mode: 644 - -/usr/share/webapps: - file.directory: - - user: http - - group: http - - recurse: - - user - - group - -/var/lib/ldap-account-manager: - file.directory: - - user: http - - group: http -# - mode: 777 - - recurse: - - user - - group -# - mode - -/etc/webapps/ldap-account-manager/config.cfg: - file.managed: - - source: salt://roles/maintain/lam/config.cfg - - user: http - - group: http - - mode: 644 - -/etc/webapps/ldap-account-manager: - file.directory: - - user: http - - group: http - - mode: 775 diff --git a/states/roles/maintain/lam/lam-server.conf b/states/roles/maintain/lam/lam-server.conf deleted file mode 100644 index 08c157f..0000000 --- a/states/roles/maintain/lam/lam-server.conf +++ /dev/null @@ -1,21 +0,0 @@ -server { - listen 8000; - - index index.html; - root /usr/share/webapps/ldap-account-manager; - autoindex off; - - location ~ \.php$ { - fastcgi_split_path_info ^(.+\.php)(/.+)$; - fastcgi_pass unix:/run/php56-fpm/php-fpm.sock; - fastcgi_index index.php; - include fastcgi.conf; - } - - location ~ /lam/(tmp/internal|sess|config|lib|help|locale) { - deny all; - return 403; - } - -} - diff --git a/states/roles/maintain/lamp/httpd.conf b/states/roles/maintain/lamp/httpd.conf deleted file mode 100644 index e45dc15..0000000 --- a/states/roles/maintain/lamp/httpd.conf +++ /dev/null @@ -1,541 +0,0 @@ -# -# This is the main Apache HTTP server configuration file. It contains the -# configuration directives that give the server its instructions. -# See for detailed information. -# In particular, see -# -# for a discussion of each configuration directive. -# -# Do NOT simply read the instructions in here without understanding -# what they do. They're here only as hints or reminders. If you are unsure -# consult the online docs. You have been warned. -# -# Configuration and logfile names: If the filenames you specify for many -# of the server's control files begin with "/" (or "drive:/" for Win32), the -# server will use that explicit path. If the filenames do *not* begin -# with "/", the value of ServerRoot is prepended -- so "logs/access_log" -# with ServerRoot set to "/usr/local/apache2" will be interpreted by the -# server as "/usr/local/apache2/logs/access_log", whereas "/logs/access_log" -# will be interpreted as '/logs/access_log'. - -# -# ServerRoot: The top of the directory tree under which the server's -# configuration, error, and log files are kept. -# -# Do not add a slash at the end of the directory path. If you point -# ServerRoot at a non-local disk, be sure to specify a local disk on the -# Mutex directive, if file-based mutexes are used. If you wish to share the -# same ServerRoot for multiple httpd daemons, you will need to change at -# least PidFile. -# -ServerRoot "/etc/httpd" - -# -# Mutex: Allows you to set the mutex mechanism and mutex file directory -# for individual mutexes, or change the global defaults -# -# Uncomment and change the directory if mutexes are file-based and the default -# mutex file directory is not on a local disk or is not appropriate for some -# other reason. -# -# Mutex default:/run/httpd - -# -# Listen: Allows you to bind Apache to specific IP addresses and/or -# ports, instead of the default. See also the -# directive. -# -# Change this to Listen on specific IP addresses as shown below to -# prevent Apache from glomming onto all bound IP addresses. -# -#Listen 12.34.56.78:80 -Listen 8000 - -# -# Dynamic Shared Object (DSO) Support -# -# To be able to use the functionality of a module which was built as a DSO you -# have to place corresponding `LoadModule' lines at this location so the -# directives contained in it are actually available _before_ they are used. -# Statically compiled modules (those listed by `httpd -l') do not need -# to be loaded here. -# -# Example: -# LoadModule foo_module modules/mod_foo.so -# -LoadModule authn_file_module modules/mod_authn_file.so -#LoadModule authn_dbm_module modules/mod_authn_dbm.so -#LoadModule authn_anon_module modules/mod_authn_anon.so -#LoadModule authn_dbd_module modules/mod_authn_dbd.so -#LoadModule authn_socache_module modules/mod_authn_socache.so -LoadModule authn_core_module modules/mod_authn_core.so -LoadModule authz_host_module modules/mod_authz_host.so -LoadModule authz_groupfile_module modules/mod_authz_groupfile.so -LoadModule authz_user_module modules/mod_authz_user.so -#LoadModule authz_dbm_module modules/mod_authz_dbm.so -#LoadModule authz_owner_module modules/mod_authz_owner.so -#LoadModule authz_dbd_module modules/mod_authz_dbd.so -LoadModule authz_core_module modules/mod_authz_core.so -#LoadModule authnz_ldap_module modules/mod_authnz_ldap.so -#LoadModule authnz_fcgi_module modules/mod_authnz_fcgi.so -LoadModule access_compat_module modules/mod_access_compat.so -LoadModule auth_basic_module modules/mod_auth_basic.so -#LoadModule auth_form_module modules/mod_auth_form.so -#LoadModule auth_digest_module modules/mod_auth_digest.so -#LoadModule allowmethods_module modules/mod_allowmethods.so -#LoadModule file_cache_module modules/mod_file_cache.so -#LoadModule cache_module modules/mod_cache.so -#LoadModule cache_disk_module modules/mod_cache_disk.so -#LoadModule cache_socache_module modules/mod_cache_socache.so -#LoadModule socache_shmcb_module modules/mod_socache_shmcb.so -#LoadModule socache_dbm_module modules/mod_socache_dbm.so -#LoadModule socache_memcache_module modules/mod_socache_memcache.so -#LoadModule watchdog_module modules/mod_watchdog.so -#LoadModule macro_module modules/mod_macro.so -#LoadModule dbd_module modules/mod_dbd.so -#LoadModule dumpio_module modules/mod_dumpio.so -#LoadModule echo_module modules/mod_echo.so -#LoadModule buffer_module modules/mod_buffer.so -#LoadModule data_module modules/mod_data.so -#LoadModule ratelimit_module modules/mod_ratelimit.so -LoadModule reqtimeout_module modules/mod_reqtimeout.so -#LoadModule ext_filter_module modules/mod_ext_filter.so -#LoadModule request_module modules/mod_request.so -LoadModule include_module modules/mod_include.so -LoadModule filter_module modules/mod_filter.so -#LoadModule reflector_module modules/mod_reflector.so -#LoadModule substitute_module modules/mod_substitute.so -#LoadModule sed_module modules/mod_sed.so -#LoadModule charset_lite_module modules/mod_charset_lite.so -#LoadModule deflate_module modules/mod_deflate.so -#LoadModule xml2enc_module modules/mod_xml2enc.so -#LoadModule proxy_html_module modules/mod_proxy_html.so -LoadModule mime_module modules/mod_mime.so -#LoadModule ldap_module modules/mod_ldap.so -LoadModule log_config_module modules/mod_log_config.so -#LoadModule log_debug_module modules/mod_log_debug.so -#LoadModule log_forensic_module modules/mod_log_forensic.so -#LoadModule logio_module modules/mod_logio.so -#LoadModule lua_module modules/mod_lua.so -LoadModule env_module modules/mod_env.so -#LoadModule mime_magic_module modules/mod_mime_magic.so -#LoadModule cern_meta_module modules/mod_cern_meta.so -#LoadModule expires_module modules/mod_expires.so -LoadModule headers_module modules/mod_headers.so -#LoadModule ident_module modules/mod_ident.so -#LoadModule usertrack_module modules/mod_usertrack.so -#LoadModule unique_id_module modules/mod_unique_id.so -LoadModule setenvif_module modules/mod_setenvif.so -LoadModule version_module modules/mod_version.so -#LoadModule remoteip_module modules/mod_remoteip.so -LoadModule proxy_module modules/mod_proxy.so -LoadModule proxy_connect_module modules/mod_proxy_connect.so -LoadModule proxy_ftp_module modules/mod_proxy_ftp.so -LoadModule proxy_http_module modules/mod_proxy_http.so -LoadModule proxy_fcgi_module modules/mod_proxy_fcgi.so -LoadModule proxy_scgi_module modules/mod_proxy_scgi.so -#LoadModule proxy_fdpass_module modules/mod_proxy_fdpass.so -LoadModule proxy_wstunnel_module modules/mod_proxy_wstunnel.so -LoadModule proxy_ajp_module modules/mod_proxy_ajp.so -LoadModule proxy_balancer_module modules/mod_proxy_balancer.so -LoadModule proxy_express_module modules/mod_proxy_express.so -#LoadModule session_module modules/mod_session.so -#LoadModule session_cookie_module modules/mod_session_cookie.so -#LoadModule session_crypto_module modules/mod_session_crypto.so -#LoadModule session_dbd_module modules/mod_session_dbd.so -LoadModule slotmem_shm_module modules/mod_slotmem_shm.so -#LoadModule slotmem_plain_module modules/mod_slotmem_plain.so -#LoadModule ssl_module modules/mod_ssl.so -#LoadModule dialup_module modules/mod_dialup.so -LoadModule lbmethod_byrequests_module modules/mod_lbmethod_byrequests.so -LoadModule lbmethod_bytraffic_module modules/mod_lbmethod_bytraffic.so -LoadModule lbmethod_bybusyness_module modules/mod_lbmethod_bybusyness.so -LoadModule lbmethod_heartbeat_module modules/mod_lbmethod_heartbeat.so -LoadModule mpm_prefork_module modules/mod_mpm_prefork.so -LoadModule unixd_module modules/mod_unixd.so -#LoadModule heartbeat_module modules/mod_heartbeat.so -#LoadModule heartmonitor_module modules/mod_heartmonitor.so -#LoadModule dav_module modules/mod_dav.so -LoadModule status_module modules/mod_status.so -LoadModule autoindex_module modules/mod_autoindex.so -#LoadModule asis_module modules/mod_asis.so -#LoadModule info_module modules/mod_info.so -#LoadModule suexec_module modules/mod_suexec.so -#LoadModule cgid_module modules/mod_cgid.so -#LoadModule cgi_module modules/mod_cgi.so -#LoadModule dav_fs_module modules/mod_dav_fs.so -#LoadModule dav_lock_module modules/mod_dav_lock.so -#LoadModule vhost_alias_module modules/mod_vhost_alias.so -LoadModule negotiation_module modules/mod_negotiation.so -LoadModule dir_module modules/mod_dir.so -#LoadModule imagemap_module modules/mod_imagemap.so -#LoadModule actions_module modules/mod_actions.so -#LoadModule speling_module modules/mod_speling.so -LoadModule userdir_module modules/mod_userdir.so -LoadModule alias_module modules/mod_alias.so -LoadModule rewrite_module modules/mod_rewrite.so -LoadModule php7_module modules/libphp7.so - -#RewriteEngine On -#RewriteCond %{HTTPS} off -#RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} - - -# -# If you wish httpd to run as a different user or group, you must run -# httpd as root initially and it will switch. -# -# User/Group: The name (or #number) of the user/group to run httpd as. -# It is usually good practice to create a dedicated user and group for -# running httpd, as with most system services. -# -User http -Group http - - - -# 'Main' server configuration -# -# The directives in this section set up the values used by the 'main' -# server, which responds to any requests that aren't handled by a -# definition. These values also provide defaults for -# any containers you may define later in the file. -# -# All of these directives may appear inside containers, -# in which case these default settings will be overridden for the -# virtual host being defined. -# - -# -# ServerAdmin: Your address, where problems with the server should be -# e-mailed. This address appears on some server-generated pages, such -# as error documents. e.g. admin@your-domain.com -# -ServerAdmin you@example.com - -# -# ServerName gives the name and port that the server uses to identify itself. -# This can often be determined automatically, but we recommend you specify -# it explicitly to prevent problems during startup. -# -# If your host doesn't have a registered DNS name, enter its IP address here. -# -#ServerName www.example.com:80 - -# -# Deny access to the entirety of your server's filesystem. You must -# explicitly permit access to web content directories in other -# blocks below. -# - - AllowOverride none - Require all denied - - -# -# Note that from this point forward you must specifically allow -# particular features to be enabled - so if something's not working as -# you might expect, make sure that you have specifically enabled it -# below. -# - -# -# DocumentRoot: The directory out of which you will serve your -# documents. By default, all requests are taken from this directory, but -# symbolic links and aliases may be used to point to other locations. -# -DocumentRoot "/srv/http" - - # - # Possible values for the Options directive are "None", "All", - # or any combination of: - # Indexes Includes FollowSymLinks SymLinksifOwnerMatch ExecCGI MultiViews - # - # Note that "MultiViews" must be named *explicitly* --- "Options All" - # doesn't give it to you. - # - # The Options directive is both complicated and important. Please see - # http://httpd.apache.org/docs/2.4/mod/core.html#options - # for more information. - # - Options Indexes FollowSymLinks - - # - # AllowOverride controls what directives may be placed in .htaccess files. - # It can be "All", "None", or any combination of the keywords: - # AllowOverride FileInfo AuthConfig Limit - # - AllowOverride None - - # - # Controls who can get stuff from this server. - # - Require all granted - - -# -# DirectoryIndex: sets the file that Apache will serve if a directory -# is requested. -# - - DirectoryIndex index.html - - -# -# The following lines prevent .htaccess and .htpasswd files from being -# viewed by Web clients. -# - - Require all denied - - -# -# ErrorLog: The location of the error log file. -# If you do not specify an ErrorLog directive within a -# container, error messages relating to that virtual host will be -# logged here. If you *do* define an error logfile for a -# container, that host's errors will be logged there and not here. -# -ErrorLog "/var/log/httpd/error_log" - -# -# LogLevel: Control the number of messages logged to the error_log. -# Possible values include: debug, info, notice, warn, error, crit, -# alert, emerg. -# -LogLevel warn - - - # - # The following directives define some format nicknames for use with - # a CustomLog directive (see below). - # - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined - LogFormat "%h %l %u %t \"%r\" %>s %b" common - - - # You need to enable mod_logio.c to use %I and %O - LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio - - - # - # The location and format of the access logfile (Common Logfile Format). - # If you do not define any access logfiles within a - # container, they will be logged here. Contrariwise, if you *do* - # define per- access logfiles, transactions will be - # logged therein and *not* in this file. - # - CustomLog "/var/log/httpd/access_log" common - - # - # If you prefer a logfile with access, agent, and referer information - # (Combined Logfile Format) you can use the following directive. - # - #CustomLog "/var/log/httpd/access_log" combined - - - - # - # Redirect: Allows you to tell clients about documents that used to - # exist in your server's namespace, but do not anymore. The client - # will make a new request for the document at its new location. - # Example: - # Redirect permanent /foo http://www.example.com/bar - - # - # Alias: Maps web paths into filesystem paths and is used to - # access content that does not live under the DocumentRoot. - # Example: - # Alias /webpath /full/filesystem/path - # - # If you include a trailing / on /webpath then the server will - # require it to be present in the URL. You will also likely - # need to provide a section to allow access to - # the filesystem path. - - # - # ScriptAlias: This controls which directories contain server scripts. - # ScriptAliases are essentially the same as Aliases, except that - # documents in the target directory are treated as applications and - # run by the server when requested rather than as documents sent to the - # client. The same rules about trailing "/" apply to ScriptAlias - # directives as to Alias. - # - ScriptAlias /cgi-bin/ "/srv/http/cgi-bin/" - - - - - # - # ScriptSock: On threaded servers, designate the path to the UNIX - # socket used to communicate with the CGI daemon of mod_cgid. - # - #Scriptsock cgisock - - -# -# "/srv/http/cgi-bin" should be changed to whatever your ScriptAliased -# CGI directory exists, if you have that configured. -# - - AllowOverride None - Options None - Require all granted - - - - # - # TypesConfig points to the file containing the list of mappings from - # filename extension to MIME-type. - # - TypesConfig conf/mime.types - - # - # AddType allows you to add to or override the MIME configuration - # file specified in TypesConfig for specific file types. - # - #AddType application/x-gzip .tgz - # - # AddEncoding allows you to have certain browsers uncompress - # information on the fly. Note: Not all browsers support this. - # - #AddEncoding x-compress .Z - #AddEncoding x-gzip .gz .tgz - # - # If the AddEncoding directives above are commented-out, then you - # probably should define those extensions to indicate media types: - # - AddType application/x-compress .Z - AddType application/x-gzip .gz .tgz - - # - # AddHandler allows you to map certain file extensions to "handlers": - # actions unrelated to filetype. These can be either built into the server - # or added with the Action directive (see below) - # - # To use CGI scripts outside of ScriptAliased directories: - # (You will also need to add "ExecCGI" to the "Options" directive.) - # - #AddHandler cgi-script .cgi - - # For type maps (negotiated resources): - #AddHandler type-map var - - # - # Filters allow you to process content before it is sent to the client. - # - # To parse .shtml files for server-side includes (SSI): - # (You will also need to add "Includes" to the "Options" directive.) - # - #AddType text/html .shtml - #AddOutputFilter INCLUDES .shtml - - -# -# The mod_mime_magic module allows the server to use various hints from the -# contents of the file itself to determine its type. The MIMEMagicFile -# directive tells the module where the hint definitions are located. -# -#MIMEMagicFile conf/magic - -# -# Customizable error responses come in three flavors: -# 1) plain text 2) local redirects 3) external redirects -# -# Some examples: -#ErrorDocument 500 "The server made a boo boo." -#ErrorDocument 404 /missing.html -#ErrorDocument 404 "/cgi-bin/missing_handler.pl" -#ErrorDocument 402 http://www.example.com/subscription_info.html -# - -# -# MaxRanges: Maximum number of Ranges in a request before -# returning the entire resource, or one of the special -# values 'default', 'none' or 'unlimited'. -# Default setting is to accept 200 Ranges. -#MaxRanges unlimited - -# -# EnableMMAP and EnableSendfile: On systems that support it, -# memory-mapping or the sendfile syscall may be used to deliver -# files. This usually improves server performance, but must -# be turned off when serving from networked-mounted -# filesystems or if support for these functions is otherwise -# broken on your system. -# Defaults: EnableMMAP On, EnableSendfile Off -# -#EnableMMAP off -#EnableSendfile on - -# Supplemental configuration -# -# The configuration files in the conf/extra/ directory can be -# included to add extra features or to modify the default configuration of -# the server, or you may simply copy their contents here and change as -# necessary. - -# Server-pool management (MPM specific) -Include conf/extra/httpd-mpm.conf - -# Multi-language error messages -Include conf/extra/httpd-multilang-errordoc.conf - -# Fancy directory listings -Include conf/extra/httpd-autoindex.conf - -# Language settings -Include conf/extra/httpd-languages.conf - -# User home directories -Include conf/extra/httpd-userdir.conf - -# Real-time info on requests and configuration -#Include conf/extra/httpd-info.conf - -# Virtual hosts -#Include conf/extra/httpd-vhosts.conf - -# Local access to the Apache HTTP Server Manual -#Include conf/extra/httpd-manual.conf - -# Distributed authoring and versioning (WebDAV) -#Include conf/extra/httpd-dav.conf - -# Various default settings -Include conf/extra/httpd-default.conf - -#php -Include conf/extra/php7_module.conf - -# ldap account manager configuration -Include conf/servers/*.conf - -# Configure mod_proxy_html to understand HTML4/XHTML1 - -Include conf/extra/proxy-html.conf - - -# Secure (SSL/TLS) connections -#Include conf/extra/httpd-ssl.conf -# -# Note: The following must must be present to support -# starting without SSL on platforms with no /dev/random equivalent -# but a statically compiled-in mod_ssl. -# - -SSLRandomSeed startup builtin -SSLRandomSeed connect builtin - -# -# uncomment out the below to deal with user agents that deliberately -# violate open standards by misusing DNT (DNT *must* be a specific -# end-user choice) -# -# -#BrowserMatch "MSIE 10.0;" bad_DNT -# -# -#RequestHeader unset DNT env=bad_DNT -# - diff --git a/states/roles/maintain/lamp/init.sls b/states/roles/maintain/lamp/init.sls deleted file mode 100644 index 010c3b1..0000000 --- a/states/roles/maintain/lamp/init.sls +++ /dev/null @@ -1,21 +0,0 @@ -apache: - pkg.installed - -/etc/httpd/conf/servers: - file.directory: - - user: root - - group: root - - mode: 755 - -/etc/httpd/conf/httpd.conf: - file.managed: - - source: salt://roles/maintain/lamp/httpd.conf - - user: root - - group: root - - mode: 644 - -php: - pkg.installed: - - pkgs: - - php - - php_apache diff --git a/states/roles/maintain/ldap/DB_CONFIG b/states/roles/maintain/ldap/DB_CONFIG deleted file mode 100644 index d0f2c68..0000000 --- a/states/roles/maintain/ldap/DB_CONFIG +++ /dev/null @@ -1,28 +0,0 @@ -# $OpenLDAP$ -# Example DB_CONFIG file for use with slapd(8) BDB/HDB databases. -# -# See the Oracle Berkeley DB documentation -# -# for detail description of DB_CONFIG syntax and semantics. -# -# Hints can also be found in the OpenLDAP Software FAQ -# -# in particular: -# - -# Note: most DB_CONFIG settings will take effect only upon rebuilding -# the DB environment. - -# one 0.25 GB cache -set_cachesize 0 268435456 1 - -# Data Directory -#set_data_dir db - -# Transaction Log settings -set_lg_regionmax 262144 -set_lg_bsize 2097152 -#set_lg_dir logs - -# Note: special DB_CONFIG flags are no longer needed for "quick" -# slapadd(8) or slapindex(8) access (see their -q option). diff --git a/states/roles/maintain/ldap/init.sls b/states/roles/maintain/ldap/init.sls deleted file mode 100644 index 9ee2ef3..0000000 --- a/states/roles/maintain/ldap/init.sls +++ /dev/null @@ -1,94 +0,0 @@ -openldap: - pkg.installed - -/var/lib/openldap/openldap-data: - file.directory: - - mode: 755 - - user: ldap - - group: ldap - -/etc/openldap/certs/: - file.recurse: - - source: salt://secure/certs/ldap.actcur.com/ - - user: ldap - - group: ldap - - dir_mode: 755 - - file:mode: 400 - - clean: true - -/usr/lib/systemd/system/slapd.service: - file.managed: - - source: salt://roles/maintain/ldap/slapd.service - - user: root - - group: root - - mode: 644 - -slapd: - service.running: - - enable: true - - watch: - - file: own_slapd.d - - file: /usr/lib/systemd/system/slapd.service - -/etc/openldap/slapd.conf: - file.managed: - - source: salt://roles/maintain/ldap/slapd.conf - - user: root - - group: ldap - - mode: 640 - -/root/update_slapd.sh: - file.managed: - - source: salt://roles/maintain/ldap/update_slapd.sh - - user: root - - group: root - - mode: 700 - -update_slapd: - cmd.run: - - name: "/bin/bash /root/update_slapd.sh" - - stateful: true - - require: - - file: /etc/openldap/slapd.conf - -own_slapd.d: - file.directory: - - name: /etc/openldap/slapd.d/ - - user: ldap - - group: ldap - - dir_mode: 755 - - file_mode: 644 - - recurse: - - user - - group - - mode - - require: - - cmd: update_slapd - -own_data: - file.directory: - - name: /var/lib/openldap/openldap-data - - user: ldap - - group: ldap - - dir_mode: 755 - - file_mode: 644 - - recurse: - - user - - group - - mode - - require: - - cmd: update_slapd - -/etc/openldap/rdn.ldiff: - file.managed: - - source: salt://roles/maintain/ldap/rdn.ldiff - - user: root - - group: root - - mode: 750 - -/var/lib/openldap/openldap-data/DB_CONFIG: - file.managed: - - source: salt://roles/maintain/ldap/DB_CONFIG - - user: ldap - - group: ldap - - mode: 644 diff --git a/states/roles/maintain/ldap/rdn.ldiff b/states/roles/maintain/ldap/rdn.ldiff deleted file mode 100644 index 79a3703..0000000 --- a/states/roles/maintain/ldap/rdn.ldiff +++ /dev/null @@ -1,11 +0,0 @@ -dn: dc=actcur,dc=com -objectClass: dcObject -objectClass: organization -dc: actcur -o: ActcurOrg -description: ActCur domain - -dn: cn=root,dc=actcur,dc=com -objectClass: organizationalRole -cn: root -description: Directory Root User diff --git a/states/roles/maintain/ldap/slapd.conf b/states/roles/maintain/ldap/slapd.conf deleted file mode 100644 index 2c7a2a5..0000000 --- a/states/roles/maintain/ldap/slapd.conf +++ /dev/null @@ -1,101 +0,0 @@ -# -# See slapd.conf(5) for details on configuration options. -# This file should NOT be world readable. -# -include /etc/openldap/schema/core.schema -include /etc/openldap/schema/cosine.schema -include /etc/openldap/schema/inetorgperson.schema -include /etc/openldap/schema/nis.schema - -# Define global ACLs to disable default read access. - -# Do not enable referrals until AFTER you have a working directory -# service AND an understanding of referrals. -#referral ldap://root.openldap.org - -pidfile /run/openldap/slapd.pid -argsfile /run/openldap/slapd.args - -# Load dynamic backend modules: -# modulepath /usr/lib/openldap -# moduleload back_mdb.la -# moduleload back_ldap.la - -# Sample security restrictions -# Require integrity protection (prevent hijacking) -# Require 112-bit (3DES or better) encryption for updates -# Require 63-bit encryption for simple bind -# security ssf=1 update_ssf=112 simple_bind=64 - -# Sample access control policy: -# Root DSE: allow anyone to read it -# Subschema (sub)entry DSE: allow anyone to read it -# Other DSEs: -# Allow self write access -# Allow authenticated users read access -# Allow anonymous users to authenticate -# Directives needed to implement policy: -# access to dn.base="" by * read -# access to dn.base="cn=Subschema" by * read -# access to * -# by self write -# by users read -# by anonymous auth -# -# if no access controls are present, the default policy -# allows anyone and everyone to read anything but restricts -# updates to rootdn. (e.g., "access to * by * read") -# -# rootdn can always read and write EVERYTHING! - -####################################################################### -# MDB database definitions -####################################################################### - -database mdb -maxsize 1073741824 -suffix "dc=actcur,dc=com" -rootdn "cn=root,dc=actcur,dc=com" -# Cleartext passwords, especially for the rootdn, should -# be avoid. See slappasswd(8) and slapd.conf(5) for details. -# Use of strong authentication encouraged. -rootpw {SSHA}26ofqGZtb6fO+/5D3cUCiZQXBZSUc/CE -#{SSHA}3KgcfNXboKlvnSo+a9SuS1roQOD13IV5 -# The database directory MUST exist prior to running slapd AND -# should only be accessible by the slapd and slap tools. -# Mode 700 recommended. -directory /var/lib/openldap/openldap-data -# Indices to maintain -index objectClass eq -index uid pres,eq -index mail pres,sub,eq -index cn pres,sub,eq -index sn pres,sub,eq -index dc eq -#rootpw {SSHA}3KgcfNXboKlvnSo+a9SuS1roQOD13IV5 -rootpw {SSHA}26ofqGZtb6fO+/5D3cUCiZQXBZSUc/CE - -####################################################################### -# Certificate/SSL Definition -####################################################################### - -TLSCipherSuite DEFAULT -TLSCertificateFile /etc/openldap/certs/cert.pem -TLSCertificateKeyFile /etc/openldap/certs/privkey.pem -TLSCACertificateFile /etc/openldap/certs/chain.pem -TLSCACertificatePath /usr/share/ca-certificates/trust-source - -#database config -#rootdn "cn=root,cn=config" -#rootpw {SSHA}3KgcfNXboKlvnSo+a9SuS1roQOD13IV5 - -access to attrs=userPassword - by self write - by anonymous auth - by group.exact="cn=ldapadm,ou=group,dc=actcur,dc=com" write - by * none - -access to * - by self read - by group.exact="cn=ldapadm,ou=group,dc=actcur,dc=com" write - by * read diff --git a/states/roles/maintain/ldap/slapd.service b/states/roles/maintain/ldap/slapd.service deleted file mode 100644 index a664fa3..0000000 --- a/states/roles/maintain/ldap/slapd.service +++ /dev/null @@ -1,9 +0,0 @@ -[Unit] -Description=OpenLDAP server daemon - -[Service] -Type=forking -ExecStart=/usr/bin/slapd -u ldap -g ldap - -[Install] -WantedBy=multi-user.target diff --git a/states/roles/maintain/ldap/update_slapd.sh b/states/roles/maintain/ldap/update_slapd.sh deleted file mode 100644 index 23ecaf0..0000000 --- a/states/roles/maintain/ldap/update_slapd.sh +++ /dev/null @@ -1,10 +0,0 @@ -if [ `find /etc/openldap/slapd.conf -mmin +1 | wc -l` == 0 ] -then - rm -Rf /etc/openldap/slapd.d/* - slaptest -f /etc/openldap/slapd.conf -F /etc/openldap/slapd.d/ - systemctl stop slapd - slapindex - echo "changed=yes comment='cleaned /etc/openldap/slapd.d and ran slaptest'" -else - echo "changed=no" -fi diff --git a/states/roles/maintain/lightbooks/init.sls b/states/roles/maintain/lightbooks/init.sls new file mode 100644 index 0000000..d25c713 --- /dev/null +++ b/states/roles/maintain/lightbooks/init.sls @@ -0,0 +1,36 @@ +lightbooks-php: + pkg.installed: + - name: php +lightbooks-php-fpm: + pkg.installed: + - name: php-fpm + service.running: + - name: php-fpm + - enable: true + - watch: + - file: /etc/php/php.ini + +lightbooks-mysql-client: + pkg.installed: + - name: mariadb-clients + +/etc/nginx/conf.d/lightbooks.conf: + file.managed: + - source: salt://roles/maintain/lightbooks/nginx.conf + - user: root + - group: root + - mode: 644 + - makedirs: true +/etc/nginx/conf.d/lightbooks-dev.conf: + file.managed: + - source: salt://roles/maintain/lightbooks/nginx-dev.conf + - user: root + - group: root + - mode: 644 + - makedirs: true +/etc/php/php.ini: + file.managed: + - source: salt://roles/maintain/lightbooks/php.ini + - user: root + - group: root + - mode: 644 diff --git a/states/roles/maintain/lightbooks/nginx-dev.conf b/states/roles/maintain/lightbooks/nginx-dev.conf new file mode 100644 index 0000000..dc7dde2 --- /dev/null +++ b/states/roles/maintain/lightbooks/nginx-dev.conf @@ -0,0 +1,44 @@ +server { + listen *:8080; + server_name books.dev.actcur.com; + + root /usr/share/webapps/lightbooks-dev/public; #Path of lightbooks-dev web directory + index index.php; + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + location = /favicon.ico { + log_not_found off; + access_log off; + expires max; + } + + location ~ /\. { + deny all; + access_log off; + log_not_found off; + } + + location ~ \..*/.*\.php$ { + return 403; + } + + if (!-d $request_filename) { + rewrite ^/(.+)/$ /$1 permanent; + } + + location / { + try_files $1 $uri $uri/ /index.php$is_args$args; + } + + location ~ ^/index\.php(.*)$ { + fastcgi_index index.php; + include /etc/nginx/fastcgi_params; + try_files $uri =404; + fastcgi_split_path_info ^(.+\.php)(/.+)$; + fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock; #Replace with the port if php fpm is configured to run on port. + fastcgi_param SCRIPT_FILENAME /usr/share/webapps/lightbooks-dev/public/index.php; #Replace with lightbooks-dev2 web index.php file path. + fastcgi_param LIGHTBOOKSWEB_CONFIGDIR /etc/lightbooks-dev; + fastcgi_param REMOTE_USER $remote_user; + } +} diff --git a/states/roles/maintain/lightbooks/nginx.conf b/states/roles/maintain/lightbooks/nginx.conf new file mode 100644 index 0000000..423002d --- /dev/null +++ b/states/roles/maintain/lightbooks/nginx.conf @@ -0,0 +1,44 @@ +server { + listen *:8000; + server_name books.actcur.com; + + root /usr/share/webapps/lightbooks/public; #Path of lightbooks2 web directory + index index.php; + access_log /var/log/nginx/access.log; + error_log /var/log/nginx/error.log; + + location = /favicon.ico { + log_not_found off; + access_log off; + expires max; + } + + location ~ /\. { + deny all; + access_log off; + log_not_found off; + } + + location ~ \..*/.*\.php$ { + return 403; + } + + if (!-d $request_filename) { + rewrite ^/(.+)/$ /$1 permanent; + } + + location / { + try_files $1 $uri $uri/ /index.php$is_args$args; + } + + location ~ ^/index\.php(.*)$ { + fastcgi_index index.php; + include /etc/nginx/fastcgi_params; + try_files $uri =404; + fastcgi_split_path_info ^(.+\.php)(/.+)$; + fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock; #Replace with the port if php fpm is configured to run on port. + fastcgi_param SCRIPT_FILENAME /usr/share/webapps/lightbooks/public/index.php; #Replace with lightbooks2 web index.php file path. + fastcgi_param LIGHTBOOKSWEB_CONFIGDIR /etc/lightbooks; + fastcgi_param REMOTE_USER $remote_user; + } +} diff --git a/states/roles/maintain/lam/php.ini b/states/roles/maintain/lightbooks/php.ini similarity index 89% rename from states/roles/maintain/lam/php.ini rename to states/roles/maintain/lightbooks/php.ini index 6d72085..5c857bc 100644 --- a/states/roles/maintain/lam/php.ini +++ b/states/roles/maintain/lightbooks/php.ini @@ -143,7 +143,7 @@ ; Development Value: 1000 ; Production Value: 1000 -; session.hash_bits_per_character +; session.sid_bits_per_character ; Default Value: 4 ; Development Value: 5 ; Production Value: 5 @@ -158,11 +158,6 @@ ; Development Value: On ; Production Value: Off -; url_rewriter.tags -; Default Value: "a=href,area=href,frame=src,form=,fieldset=" -; Development Value: "a=href,area=href,frame=src,input=src,form=fakeentry" -; Production Value: "a=href,area=href,frame=src,input=src,form=fakeentry" - ; variables_order ; Default Value: "EGPCS" ; Development Value: "GPCS" @@ -201,10 +196,6 @@ engine = On ; http://php.net/short-open-tag short_open_tag = Off -; Allow ASP-style <% %> tags. -; http://php.net/asp-tags -asp_tags = Off - ; The number of significant digits displayed in floating point numbers. ; http://php.net/precision precision = 14 @@ -248,6 +239,23 @@ output_buffering = 4096 ; http://php.net/output-handler ;output_handler = +; URL rewriter function rewrites URL on the fly by using +; output buffer. You can set target tags by this configuration. +; "form" tag is special tag. It will add hidden input tag to pass values. +; Refer to session.trans_sid_tags for usage. +; Default Value: "form=" +; Development Value: "form=" +; Production Value: "form=" +;url_rewriter.tags + +; URL rewriter will not rewrites absolute URL nor form by default. To enable +; absolute URL rewrite, allowed hosts must be defined at RUNTIME. +; Refer to session.trans_sid_hosts for more details. +; Default Value: "" +; Development Value: "" +; Production Value: "" +;url_rewriter.hosts + ; Transparent output compression using the zlib library ; Valid values for this option are 'off', 'on', or a specific buffer size ; to be used for compression (default is 4KB) @@ -289,13 +297,16 @@ unserialize_callback_func = ; When floats & doubles are serialized store serialize_precision significant ; digits after the floating point. The default value ensures that when floats ; are decoded with unserialize, the data will remain the same. -serialize_precision = 17 +; The value is also used for json_encode when encoding double values. +; If -1 is used, then dtoa mode 0 is used which automatically select the best +; precision. +serialize_precision = -1 ; open_basedir, if set, limits all file operations to the defined directory ; and below. This directive makes most sense if used in a per-directory ; or per-virtualhost web server configuration file. ; http://php.net/open-basedir -open_basedir = /srv/http/:/home/:/tmp/:/usr/share/pear/:/usr/share/webapps/:/etc/webapps:/var/lib/ldap-account-manager/ +;open_basedir = ; This directive allows you to disable certain functions for security reasons. ; It receives a comma-delimited list of function names. @@ -327,7 +338,7 @@ disable_classes = ; be increased on systems where PHP opens many files to reflect the quantity of ; the file operations performed. ; http://php.net/realpath-cache-size -;realpath_cache_size = 16k +;realpath_cache_size = 4096k ; Duration of time, in seconds for which to cache realpath information for a given ; file or directory. For systems with rarely changing files, consider increasing this @@ -667,11 +678,10 @@ auto_prepend_file = ; http://php.net/auto-append-file auto_append_file = -; By default, PHP will output a character encoding using -; the Content-type: header. To disable sending of the charset, simply -; set it to be empty. +; By default, PHP will output a media type using the Content-Type header. To +; disable this, simply set it to be empty. ; -; PHP's built-in default is text/html +; PHP's built-in default media type is set to text/html. ; http://php.net/default-mimetype default_mimetype = "text/html" @@ -691,24 +701,16 @@ default_charset = "UTF-8" ; PHP output character encoding is set to empty. ; If empty, default_charset is used. -; mbstring or iconv output handler is used. ; See also output_buffer. ; http://php.net/output-encoding ;output_encoding = -; Always populate the $HTTP_RAW_POST_DATA variable. PHP's default behavior is -; to disable this feature and it will be removed in a future version. -; If post reading is disabled through enable_post_data_reading, -; $HTTP_RAW_POST_DATA is *NOT* populated. -; http://php.net/always-populate-raw-post-data -;always_populate_raw_post_data = -1 - ;;;;;;;;;;;;;;;;;;;;;;;;; ; Paths and Directories ; ;;;;;;;;;;;;;;;;;;;;;;;;; ; UNIX: "/path1:/path2" -include_path = ".:/usr/share/pear" +;include_path = ".:/php/includes" ; ; Windows: "\path1;\path2" ;include_path = ".;c:\php\includes" @@ -731,7 +733,7 @@ user_dir = ; Directory in which the loadable extensions (modules) reside. ; http://php.net/extension-dir -extension_dir = "/usr/lib/php56/modules/" +extension_dir = "/usr/lib/php/modules/" ; On windows: ; extension_dir = "ext" @@ -772,6 +774,11 @@ enable_dl = Off ; http://php.net/cgi.fix-pathinfo ;cgi.fix_pathinfo=1 +; if cgi.discard_path is enabled, the PHP CGI binary can safely be placed outside +; of the web tree and people will not be able to circumvent .htaccess security. +; http://php.net/cgi.dicard-path +;cgi.discard_path=1 + ; FastCGI under IIS (on WINNT based OS) supports the ability to impersonate ; security tokens of the calling client. This allows IIS to define the ; security context that the request runs under. mod_fastcgi under Apache @@ -792,6 +799,13 @@ enable_dl = Off ; http://php.net/cgi.rfc2616-headers ;cgi.rfc2616_headers = 0 +; cgi.check_shebang_line controls whether CGI PHP checks for line starting with #! +; (shebang) at the top of the running script. This line might be needed if the +; script support running both as stand-alone script and via PHP CGI<. PHP in CGI +; mode skips this line and ignores its content if this directive is turned on. +; http://php.net/cgi.check-shebang-line +;cgi.check_shebang_line=1 + ;;;;;;;;;;;;;;;; ; File Uploads ; ;;;;;;;;;;;;;;;; @@ -885,31 +899,27 @@ extension=gettext.so ;extension=intl.so extension=ldap.so ;extension=mcrypt.so -;extension=mssql.so -;extension=mysql.so ;extension=mysqli.so ;extension=odbc.so ;zend_extension=opcache.so -extension=openssl.so -;extension=pdo_mysql.so +;extension=pdo_dblib.so +extension=pdo_mysql.so ;extension=pdo_odbc.so ;extension=pdo_pgsql.so -;extension=pdo_sqlite.so +extension=pdo_sqlite.so ;extension=pgsql.so -;extension=phar.so -;extension=posix.so ;extension=pspell.so ;extension=shmop.so ;extension=snmp.so ;extension=soap.so -;extension=sockets.so +extension=sockets.so ;extension=sqlite3.so ;extension=sysvmsg.so ;extension=sysvsem.so ;extension=sysvshm.so ;extension=tidy.so ;extension=xmlrpc.so -;extension=xsl.so +extension=xsl.so extension=zip.so ;;;;;;;;;;;;;;;;;;; @@ -923,7 +933,7 @@ cli_server.color = On [Date] ; Defines the default timezone used by the date functions ; http://php.net/date.timezone -;date.timezone = +date.timezone = America/Chicago ; http://php.net/date.default-latitude ;date.default_latitude = 31.7667 @@ -968,10 +978,7 @@ cli_server.color = On ; happens within intl functions. The value is the level of the error produced. ; Default is 0, which does not produce any errors. ;intl.error_level = E_WARNING - -[sqlite] -; http://php.net/sqlite.assoc-case -;sqlite.assoc_case = 0 +;intl.use_exceptions = 0 [sqlite3] ;sqlite3.extension_dir = @@ -988,6 +995,10 @@ cli_server.color = On ; http://php.net/pcre.recursion-limit ;pcre.recursion_limit=100000 +;Enables or disables JIT compilation of patterns. This requires the PCRE +;library to be compiled with JIT support. +;pcre.jit=1 + [Pdo] ; Whether to pool ODBC connections. Can be one of "strict", "relaxed" or "off" ; http://php.net/pdo-odbc.connection-pooling @@ -1121,64 +1132,6 @@ ibase.dateformat = "%Y-%m-%d" ; Default time format. ibase.timeformat = "%H:%M:%S" -[MySQL] -; Allow accessing, from PHP's perspective, local files with LOAD DATA statements -; http://php.net/mysql.allow_local_infile -mysql.allow_local_infile = On - -; Allow or prevent persistent links. -; http://php.net/mysql.allow-persistent -mysql.allow_persistent = On - -; If mysqlnd is used: Number of cache slots for the internal result set cache -; http://php.net/mysql.cache_size -mysql.cache_size = 2000 - -; Maximum number of persistent links. -1 means no limit. -; http://php.net/mysql.max-persistent -mysql.max_persistent = -1 - -; Maximum number of links (persistent + non-persistent). -1 means no limit. -; http://php.net/mysql.max-links -mysql.max_links = -1 - -; Default port number for mysql_connect(). If unset, mysql_connect() will use -; the $MYSQL_TCP_PORT or the mysql-tcp entry in /etc/services or the -; compile-time value defined MYSQL_PORT (in that order). Win32 will only look -; at MYSQL_PORT. -; http://php.net/mysql.default-port -mysql.default_port = - -; Default socket name for local MySQL connects. If empty, uses the built-in -; MySQL defaults. -; http://php.net/mysql.default-socket -mysql.default_socket = - -; Default host for mysql_connect() (doesn't apply in safe mode). -; http://php.net/mysql.default-host -mysql.default_host = - -; Default user for mysql_connect() (doesn't apply in safe mode). -; http://php.net/mysql.default-user -mysql.default_user = - -; Default password for mysql_connect() (doesn't apply in safe mode). -; Note that this is generally a *bad* idea to store passwords in this file. -; *Any* user with PHP access can run 'echo get_cfg_var("mysql.default_password") -; and reveal this password! And of course, any users with read access to this -; file will be able to reveal the password as well. -; http://php.net/mysql.default-password -mysql.default_password = - -; Maximum time (in seconds) for connect timeout. -1 means no limit -; http://php.net/mysql.connect-timeout -mysql.connect_timeout = 60 - -; Trace mode. When trace_mode is active (=On), warnings for table/index scans and -; SQL-Errors will be displayed. -; http://php.net/mysql.trace-mode -mysql.trace_mode = Off - [MySQLi] ; Maximum number of persistent links. -1 means no limit. @@ -1243,6 +1196,19 @@ mysqlnd.collect_statistics = On ; http://php.net/mysqlnd.collect_memory_statistics mysqlnd.collect_memory_statistics = Off +; Records communication from all extensions using mysqlnd to the specified log +; file. +; http://php.net/mysqlnd.debug +;mysqlnd.debug = + +; Defines which queries will be logged. +; http://php.net/mysqlnd.log_mask +;mysqlnd.log_mask = 0 + +; Default size of the mysqlnd memory pool, which is used by result sets. +; http://php.net/mysqlnd.mempool_default_size +;mysqlnd.mempool_default_size = 16000 + ; Size of a pre-allocated buffer used when sending commands to MySQL in bytes. ; http://php.net/mysqlnd.net_cmd_buffer_size ;mysqlnd.net_cmd_buffer_size = 2048 @@ -1252,6 +1218,15 @@ mysqlnd.collect_memory_statistics = Off ; http://php.net/mysqlnd.net_read_buffer_size ;mysqlnd.net_read_buffer_size = 32768 +; Timeout for network requests in seconds. +; http://php.net/mysqlnd.net_read_timeout +;mysqlnd.net_read_timeout = 31536000 + +; SHA-256 Authentication Plugin related. File with the MySQL server public RSA +; key. +; http://php.net/mysqlnd.sha256_server_public_key +;mysqlnd.sha256_server_public_key = + [OCI8] ; Connection: Enables privileged connections using external @@ -1333,45 +1308,6 @@ pgsql.ignore_notice = 0 ; http://php.net/pgsql.log-notice pgsql.log_notice = 0 -[Sybase-CT] -; Allow or prevent persistent links. -; http://php.net/sybct.allow-persistent -sybct.allow_persistent = On - -; Maximum number of persistent links. -1 means no limit. -; http://php.net/sybct.max-persistent -sybct.max_persistent = -1 - -; Maximum number of links (persistent + non-persistent). -1 means no limit. -; http://php.net/sybct.max-links -sybct.max_links = -1 - -; Minimum server message severity to display. -; http://php.net/sybct.min-server-severity -sybct.min_server_severity = 10 - -; Minimum client message severity to display. -; http://php.net/sybct.min-client-severity -sybct.min_client_severity = 10 - -; Set per-context timeout -; http://php.net/sybct.timeout -;sybct.timeout= - -;sybct.packet_size - -; The maximum time in seconds to wait for a connection attempt to succeed before returning failure. -; Default: one minute -;sybct.login_timeout= - -; The name of the host you claim to be connecting from, for display by sp_who. -; Default: none -;sybct.hostname= - -; Allows you to define how often deadlocks are to be retried. -1 means "forever". -; Default: 0 -;sybct.deadlock_retry_count= - [bcmath] ; Number of decimal digits for all bcmath functions. ; http://php.net/bcmath.scale @@ -1510,19 +1446,6 @@ session.gc_maxlifetime = 1440 ; http://php.net/session.referer-check session.referer_check = -; How many bytes to read from the file. -; http://php.net/session.entropy-length -;session.entropy_length = 32 - -; Specified here to create the session id. -; http://php.net/session.entropy-file -; Defaults to /dev/urandom -; On systems that don't have /dev/urandom but do have /dev/arandom, this will default to /dev/arandom -; If neither are found at compile time, the default is no entropy file. -; On windows, setting the entropy_length setting will activate the -; Windows random source (using the CryptoAPI) -;session.entropy_file = /dev/urandom - ; Set to {nocache,private,public,} to determine HTTP caching aspects ; or leave this empty to avoid sending anti-caching headers. ; http://php.net/session.cache-limiter @@ -1544,15 +1467,39 @@ session.cache_expire = 180 ; http://php.net/session.use-trans-sid session.use_trans_sid = 0 -; Select a hash function for use in generating session ids. -; Possible Values -; 0 (MD5 128 bits) -; 1 (SHA-1 160 bits) -; This option may also be set to the name of any hash function supported by -; the hash extension. A list of available hashes is returned by the hash_algos() -; function. -; http://php.net/session.hash-function -session.hash_function = 0 +; Set session ID character length. This value could be between 22 to 256. +; Shorter length than default is supported only for compatibility reason. +; Users should use 32 or more chars. +; http://php.net/session.sid-length +; Default Value: 32 +; Development Value: 26 +; Production Value: 26 +session.sid_length = 26 + +; The URL rewriter will look for URLs in a defined set of HTML tags. +;
is special; if you include them here, the rewriter will +; add a hidden field with the info which is otherwise appended +; to URLs. tag's action attribute URL will not be modified +; unless it is specified. +; Note that all valid entries require a "=", even if no value follows. +; Default Value: "a=href,area=href,frame=src,form=" +; Development Value: "a=href,area=href,frame=src,form=" +; Production Value: "a=href,area=href,frame=src,form=" +; http://php.net/url-rewriter.tags +session.trans_sid_tags = "a=href,area=href,frame=src,form=" + +; URL rewriter does not rewrite absolute URLs by default. +; To enable rewrites for absolute pathes, target hosts must be specified +; at RUNTIME. i.e. use ini_set() +; tags is special. PHP will check action attribute's URL regardless +; of session.trans_sid_tags setting. +; If no host is defined, HTTP_HOST will be used for allowed host. +; Example value: php.net,www.php.net,wiki.php.net +; Use "," for multiple hosts. No spaces are allowed. +; Default Value: "" +; Development Value: "" +; Production Value: "" +;session.trans_sid_hosts="" ; Define how many bits are stored in each character when converting ; the binary hash data to something readable. @@ -1564,18 +1511,7 @@ session.hash_function = 0 ; Development Value: 5 ; Production Value: 5 ; http://php.net/session.hash-bits-per-character -session.hash_bits_per_character = 5 - -; The URL rewriter will look for URLs in a defined set of HTML tags. -; form/fieldset are special; if you include them here, the rewriter will -; add a hidden field with the info which is otherwise appended -; to URLs. If you want XHTML conformity, remove the form entry. -; Note that all valid entries require a "=", even if no value follows. -; Default Value: "a=href,area=href,frame=src,form=,fieldset=" -; Development Value: "a=href,area=href,frame=src,input=src,form=fakeentry" -; Production Value: "a=href,area=href,frame=src,input=src,form=fakeentry" -; http://php.net/url-rewriter.tags -url_rewriter.tags = "a=href,area=href,frame=src,input=src,form=fakeentry" +session.sid_bits_per_character = 5 ; Enable upload progress tracking in $_SESSION ; Default Value: On @@ -1622,64 +1558,31 @@ url_rewriter.tags = "a=href,area=href,frame=src,input=src,form=fakeentry" ; http://php.net/session.upload-progress.min-freq ;session.upload_progress.min_freq = "1" -[MSSQL] -; Allow or prevent persistent links. -mssql.allow_persistent = On - -; Maximum number of persistent links. -1 means no limit. -mssql.max_persistent = -1 - -; Maximum number of links (persistent+non persistent). -1 means no limit. -mssql.max_links = -1 - -; Minimum error severity to display. -mssql.min_error_severity = 10 - -; Minimum message severity to display. -mssql.min_message_severity = 10 - -; Compatibility mode with old versions of PHP 3.0. -mssql.compatibility_mode = Off - -; Connect timeout -;mssql.connect_timeout = 5 - -; Query timeout -;mssql.timeout = 60 - -; Valid range 0 - 2147483647. Default = 4096. -;mssql.textlimit = 4096 - -; Valid range 0 - 2147483647. Default = 4096. -;mssql.textsize = 4096 - -; Limits the number of records in each batch. 0 = all records in one batch. -;mssql.batchsize = 0 - -; Specify how datetime and datetim4 columns are returned -; On => Returns data converted to SQL server settings -; Off => Returns values as YYYY-MM-DD hh:mm:ss -;mssql.datetimeconvert = On - -; Use NT authentication when connecting to the server -mssql.secure_connection = Off - -; Specify max number of processes. -1 = library default -; msdlib defaults to 25 -; FreeTDS defaults to 4096 -;mssql.max_procs = -1 - -; Specify client character set. -; If empty or not set the client charset from freetds.conf is used -; This is only used when compiled with FreeTDS -;mssql.charset = "ISO-8859-1" +; Only write session data when session data is changed. Enabled by default. +; http://php.net/session.lazy-write +;session.lazy_write = On [Assertion] +; Switch whether to compile assertions at all (to have no overhead at run-time) +; -1: Do not compile at all +; 0: Jump over assertion at run-time +; 1: Execute assertions +; Changing from or to a negative value is only possible in php.ini! (For turning assertions on and off at run-time, see assert.active, when zend.assertions = 1) +; Default Value: 1 +; Development Value: 1 +; Production Value: -1 +; http://php.net/zend.assertions +zend.assertions = -1 + ; Assert(expr); active by default. ; http://php.net/assert.active ;assert.active = On -; Issue a PHP warning for each failed assertion. +; Throw an AssertationException on failed assertions +; http://php.net/assert.exception +;assert.exception = On + +; Issue a PHP warning for each failed assertion. (Overridden by assert.exception if active) ; http://php.net/assert.warning ;assert.warning = On @@ -1723,7 +1626,7 @@ mssql.secure_connection = Off [mbstring] ; language for internal character representation. -; This affects mb_send_mail() and mbstrig.detect_order. +; This affects mb_send_mail() and mbstring.detect_order. ; http://php.net/mbstring.language ;mbstring.language = Japanese @@ -1795,7 +1698,7 @@ mssql.secure_connection = Off ; a gd image. The warning will then be displayed as notices ; disabled by default ; http://php.net/gd.jpeg-ignore-warning -;gd.jpeg_ignore_warning = 0 +;gd.jpeg_ignore_warning = 1 [exif] ; Exif UNICODE user comments are handled as UCS-2BE/UCS-2LE and JIS as JIS. @@ -1873,20 +1776,20 @@ ldap.max_links = -1 [opcache] ; Determines if Zend OPCache is enabled -;opcache.enable=0 +;opcache.enable=1 ; Determines if Zend OPCache is enabled for the CLI version of PHP ;opcache.enable_cli=0 ; The OPcache shared memory storage size. -;opcache.memory_consumption=64 +;opcache.memory_consumption=128 ; The amount of memory for interned strings in Mbytes. -;opcache.interned_strings_buffer=4 +;opcache.interned_strings_buffer=8 ; The maximum number of keys (scripts) in the OPcache hash table. -; Only numbers between 200 and 100000 are allowed. -;opcache.max_accelerated_files=2000 +; Only numbers between 200 and 1000000 are allowed. +;opcache.max_accelerated_files=10000 ; The maximum percentage of "wasted" memory until a restart is scheduled. ;opcache.max_wasted_percentage=5 @@ -1913,12 +1816,8 @@ ldap.max_links = -1 ; size of the optimized code. ;opcache.save_comments=1 -; If disabled, PHPDoc comments are not loaded from SHM, so "Doc Comments" -; may be always stored (save_comments=1), but not loaded by applications -; that don't need them anyway. -;opcache.load_comments=1 - ; If enabled, a fast shutdown sequence is used for the accelerated code +; Depending on the used Memory Manager this may cause some incompatibilities. ;opcache.fast_shutdown=0 ; Allow file existence override (file_exists, etc.) performance feature. @@ -1967,6 +1866,42 @@ ldap.max_links = -1 ; Useful for internal debugging only. ;opcache.protect_memory=0 +; Allows calling OPcache API functions only from PHP scripts which path is +; started from specified string. The default "" means no restriction +;opcache.restrict_api= + +; Mapping base of shared memory segments (for Windows only). All the PHP +; processes have to map shared memory into the same address space. This +; directive allows to manually fix the "Unable to reattach to base address" +; errors. +;opcache.mmap_base= + +; Enables and sets the second level cache directory. +; It should improve performance when SHM memory is full, at server restart or +; SHM reset. The default "" disables file based caching. +;opcache.file_cache= + +; Enables or disables opcode caching in shared memory. +;opcache.file_cache_only=0 + +; Enables or disables checksum validation when script loaded from file cache. +;opcache.file_cache_consistency_checks=1 + +; Implies opcache.file_cache_only=1 for a certain process that failed to +; reattach to the shared memory (for Windows only). Explicitly enabled file +; cache is required. +;opcache.file_cache_fallback=1 + +; Enables or disables copying of PHP code (text segment) into HUGE PAGES. +; This should improve performance, but requires appropriate OS configuration. +;opcache.huge_code_pages=1 + +; Validate cached file permissions. +;opcache.validate_permission=0 + +; Prevent name collisions in chroot'ed environment. +;opcache.validate_root=0 + [curl] ; A default value for the CURLOPT_CAINFO option. This is required to be an ; absolute path. diff --git a/states/roles/maintain/nfs/init.sls b/states/roles/maintain/nfs/init.sls deleted file mode 100644 index 7be0943..0000000 --- a/states/roles/maintain/nfs/init.sls +++ /dev/null @@ -1,14 +0,0 @@ -nfs-utils: - pkg.installed - -rpcbind: - service.running: - - enable: true - -nfs-client.target: - service.running: - - enable: true - -remote-fs.target: - service.running: - - enable: true diff --git a/states/roles/maintain/pepper/conf.d/pepper.conf b/states/roles/maintain/pepper/conf.d/pepper.conf deleted file mode 100644 index ad9e053..0000000 --- a/states/roles/maintain/pepper/conf.d/pepper.conf +++ /dev/null @@ -1,45 +0,0 @@ -server { - listen 80; - server_name salt.uwsp.edu; - - charset utf-8; - - #root /usr/share/nginx/html/abc.com/public; - root /opt/pepper/public; - index index.php index.html; - - location / { - try_files $uri $uri/ /index.php?$query_string; - } - - error_page 500 502 503 504 /50x.html; - location = /50x.html { - root /usr/share/nginx/html; - } - - location ~ \.php$ { - fastcgi_split_path_info ^(.+\.php)(/.+)$; - fastcgi_pass unix:/var/run/php-fpm/php-fpm.sock; - fastcgi_index index.php; - include fastcgi_params; - fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; - fastcgi_intercept_errors off; - fastcgi_buffer_size 16k; - fastcgi_buffers 4 16k; - } - - location ~* \.(jpg|jpeg|gif|png|css|js|ico|xml)$ { - access_log off; - log_not_found off; - expires 30d; - } - - location ~ /\. { - deny all; access_log off; log_not_found off; - } - - location = /favicon.ico { access_log off; log_not_found off; } - location = /robots.txt { access_log off; log_not_found off; } - - client_max_body_size 100m; -} diff --git a/states/roles/maintain/pepper/init.sls b/states/roles/maintain/pepper/init.sls deleted file mode 100644 index 0b17c00..0000000 --- a/states/roles/maintain/pepper/init.sls +++ /dev/null @@ -1,52 +0,0 @@ - -nginx: - service.running: - - enable: true - - watch: - - file: /etc/nginx/conf.d/* - -php-fpm: - service.running: - - enable: true - - watch: - - file: /etc/php-fpm.d/www.conf - -/etc/nginx/conf.d/: - file.recurse: - - source: salt://roles/maintain/pepper/conf.d/ - - user: root - - group: root - - dir_mode: 755 - - file_mode: 644 - - clean: true - -/etc/php-fpm.d/www.conf: - file.managed: - - source: salt://roles/maintain/pepper/www.conf - - user: root - - group: root - - mode: 644 - -/opt/pepper: - file.directory: - - user: nginx - - group: nginx - - recurse: - - user - - group - -/opt/pepper/storage: - file.directory: - - mode: 775 - - recurse: - - mode - -#/opt/saltpad/saltpad/modules/: -# file.recurse: -# - source: salt://roles/maintain/saltpad/modules/ -# - user: root -# - group: root -# - dir_mode: 755 -# - file_mode: 644 -# - clean: true - diff --git a/states/roles/maintain/pepper/www.conf b/states/roles/maintain/pepper/www.conf deleted file mode 100644 index e9a2137..0000000 --- a/states/roles/maintain/pepper/www.conf +++ /dev/null @@ -1,227 +0,0 @@ -; Start a new pool named 'www'. -[www] - -; The address on which to accept FastCGI requests. -; Valid syntaxes are: -; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific address on -; a specific port; -; 'port' - to listen on a TCP socket to all addresses on a -; specific port; -; '/path/to/unix/socket' - to listen on a unix socket. -; Note: This value is mandatory. -listen = /var/run/php-fpm/php-fpm.sock - -; Set listen(2) backlog. A value of '-1' means unlimited. -; Default Value: -1 -;listen.backlog = -1 - -; List of ipv4 addresses of FastCGI clients which are allowed to connect. -; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original -; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address -; must be separated by a comma. If this value is left blank, connections will be -; accepted from any ip address. -; Default Value: any -listen.allowed_clients = 127.0.0.1 - -; Set permissions for unix socket, if one is used. In Linux, read/write -; permissions must be set in order to allow connections from a web server. Many -; BSD-derived systems allow connections regardless of permissions. -; Default Values: user and group are set as the running user -; mode is set to 0666 -listen.owner = nginx -listen.group = nginx -;listen.mode = 0666 - -; Unix user/group of processes -; Note: The user is mandatory. If the group is not set, the default user's group -; will be used. -; RPM: apache Choosed to be able to access some dir as httpd -user = nginx -; RPM: Keep a group allowed to write in log dir. -group = nginx - -; Choose how the process manager will control the number of child processes. -; Possible Values: -; static - a fixed number (pm.max_children) of child processes; -; dynamic - the number of child processes are set dynamically based on the -; following directives: -; pm.max_children - the maximum number of children that can -; be alive at the same time. -; pm.start_servers - the number of children created on startup. -; pm.min_spare_servers - the minimum number of children in 'idle' -; state (waiting to process). If the number -; of 'idle' processes is less than this -; number then some children will be created. -; pm.max_spare_servers - the maximum number of children in 'idle' -; state (waiting to process). If the number -; of 'idle' processes is greater than this -; number then some children will be killed. -; Note: This value is mandatory. -pm = dynamic - -; The number of child processes to be created when pm is set to 'static' and the -; maximum number of child processes to be created when pm is set to 'dynamic'. -; This value sets the limit on the number of simultaneous requests that will be -; served. Equivalent to the ApacheMaxClients directive with mpm_prefork. -; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP -; CGI. -; Note: Used when pm is set to either 'static' or 'dynamic' -; Note: This value is mandatory. -pm.max_children = 50 - -; The number of child processes created on startup. -; Note: Used only when pm is set to 'dynamic' -; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2 -pm.start_servers = 5 - -; The desired minimum number of idle server processes. -; Note: Used only when pm is set to 'dynamic' -; Note: Mandatory when pm is set to 'dynamic' -pm.min_spare_servers = 5 - -; The desired maximum number of idle server processes. -; Note: Used only when pm is set to 'dynamic' -; Note: Mandatory when pm is set to 'dynamic' -pm.max_spare_servers = 35 - -; The number of requests each child process should execute before respawning. -; This can be useful to work around memory leaks in 3rd party libraries. For -; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS. -; Default Value: 0 -;pm.max_requests = 500 - -; The URI to view the FPM status page. If this value is not set, no URI will be -; recognized as a status page. By default, the status page shows the following -; information: -; accepted conn - the number of request accepted by the pool; -; pool - the name of the pool; -; process manager - static or dynamic; -; idle processes - the number of idle processes; -; active processes - the number of active processes; -; total processes - the number of idle + active processes. -; The values of 'idle processes', 'active processes' and 'total processes' are -; updated each second. The value of 'accepted conn' is updated in real time. -; Example output: -; accepted conn: 12073 -; pool: www -; process manager: static -; idle processes: 35 -; active processes: 65 -; total processes: 100 -; By default the status page output is formatted as text/plain. Passing either -; 'html' or 'json' as a query string will return the corresponding output -; syntax. Example: -; http://www.foo.bar/status -; http://www.foo.bar/status?json -; http://www.foo.bar/status?html -; Note: The value must start with a leading slash (/). The value can be -; anything, but it may not be a good idea to use the .php extension or it -; may conflict with a real PHP file. -; Default Value: not set -;pm.status_path = /status - -; The ping URI to call the monitoring page of FPM. If this value is not set, no -; URI will be recognized as a ping page. This could be used to test from outside -; that FPM is alive and responding, or to -; - create a graph of FPM availability (rrd or such); -; - remove a server from a group if it is not responding (load balancing); -; - trigger alerts for the operating team (24/7). -; Note: The value must start with a leading slash (/). The value can be -; anything, but it may not be a good idea to use the .php extension or it -; may conflict with a real PHP file. -; Default Value: not set -;ping.path = /ping - -; This directive may be used to customize the response of a ping request. The -; response is formatted as text/plain with a 200 response code. -; Default Value: pong -;ping.response = pong - -; The timeout for serving a single request after which the worker process will -; be killed. This option should be used when the 'max_execution_time' ini option -; does not stop script execution for some reason. A value of '0' means 'off'. -; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) -; Default Value: 0 -;request_terminate_timeout = 0 - -; The timeout for serving a single request after which a PHP backtrace will be -; dumped to the 'slowlog' file. A value of '0s' means 'off'. -; Available units: s(econds)(default), m(inutes), h(ours), or d(ays) -; Default Value: 0 -;request_slowlog_timeout = 0 - -; The log file for slow requests -; Default Value: not set -; Note: slowlog is mandatory if request_slowlog_timeout is set -slowlog = /var/log/php-fpm/www-slow.log - -; Set open file descriptor rlimit. -; Default Value: system defined value -;rlimit_files = 1024 - -; Set max core size rlimit. -; Possible Values: 'unlimited' or an integer greater or equal to 0 -; Default Value: system defined value -;rlimit_core = 0 - -; Chroot to this directory at the start. This value must be defined as an -; absolute path. When this value is not set, chroot is not used. -; Note: chrooting is a great security feature and should be used whenever -; possible. However, all PHP paths will be relative to the chroot -; (error_log, sessions.save_path, ...). -; Default Value: not set -;chroot = - -; Chdir to this directory at the start. This value must be an absolute path. -; Default Value: current directory or / when chroot -;chdir = /var/www - -; Redirect worker stdout and stderr into main error log. If not set, stdout and -; stderr will be redirected to /dev/null according to FastCGI specs. -; Default Value: no -;catch_workers_output = yes - -; Limits the extensions of the main script FPM will allow to parse. This can -; prevent configuration mistakes on the web server side. You should only limit -; FPM to .php extensions to prevent malicious users to use other extensions to -; exectute php code. -; Note: set an empty value to allow all extensions. -; Default Value: .php -;security.limit_extensions = .php .php3 .php4 .php5 - -; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from -; the current environment. -; Default Value: clean env -;env[HOSTNAME] = $HOSTNAME -;env[PATH] = /usr/local/bin:/usr/bin:/bin -;env[TMP] = /tmp -;env[TMPDIR] = /tmp -;env[TEMP] = /tmp - -; Additional php.ini defines, specific to this pool of workers. These settings -; overwrite the values previously defined in the php.ini. The directives are the -; same as the PHP SAPI: -; php_value/php_flag - you can set classic ini defines which can -; be overwritten from PHP call 'ini_set'. -; php_admin_value/php_admin_flag - these directives won't be overwritten by -; PHP call 'ini_set' -; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no. - -; Defining 'extension' will load the corresponding shared extension from -; extension_dir. Defining 'disable_functions' or 'disable_classes' will not -; overwrite previously defined php.ini values, but will append the new value -; instead. - -; Default Value: nothing is defined by default except the values in php.ini and -; specified at startup with the -d argument -;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com -;php_flag[display_errors] = off -php_admin_value[error_log] = /var/log/php-fpm/www-error.log -php_admin_flag[log_errors] = on -;php_admin_value[memory_limit] = 128M - -; Set session path to a directory owned by process user -php_value[session.save_handler] = files -php_value[session.save_path] = /var/lib/php/session -php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache - diff --git a/states/roles/maintain/plexrequests/init.sls b/states/roles/maintain/plexrequests/init.sls deleted file mode 100644 index 91cbc29..0000000 --- a/states/roles/maintain/plexrequests/init.sls +++ /dev/null @@ -1,17 +0,0 @@ -/opt/plexrequests.sh: - file.managed: - - source: salt://roles/maintain/plexrequests/plexrequests.sh - - user: root - - group: root - - mode: 644 - -"/usr/lib/systemd/system/plexrequests.service": - file.managed: - - source: salt://roles/maintain/plexrequests/plexrequests.service - - user: root - - group: root - - mode: 644 - -plexrequests: - service.running: - - enable: true diff --git a/states/roles/maintain/plexrequests/plexrequests.service b/states/roles/maintain/plexrequests/plexrequests.service deleted file mode 100644 index 250919a..0000000 --- a/states/roles/maintain/plexrequests/plexrequests.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Systemd script to run PlexRequests as a service -After=network-online.target - -[Service] -User=root -Group=root -Type=simple -ExecStart=/bin/bash /opt/plexrequests.sh -TimeoutStopSec=20 - -[Install] -WantedBy=multi-user.target diff --git a/states/roles/maintain/plexrequests/plexrequests.sh b/states/roles/maintain/plexrequests/plexrequests.sh deleted file mode 100644 index 3d2dfbd..0000000 --- a/states/roles/maintain/plexrequests/plexrequests.sh +++ /dev/null @@ -1,2 +0,0 @@ -cd /opt/plexrequests -meteor diff --git a/states/roles/maintain/saltpad/conf.d/saltpad.conf b/states/roles/maintain/saltpad/conf.d/saltpad.conf deleted file mode 100644 index 0739efe..0000000 --- a/states/roles/maintain/saltpad/conf.d/saltpad.conf +++ /dev/null @@ -1,19 +0,0 @@ -server { - listen 80; - server_name csalt.s.mpp; -# ssl_certificate /opt/server.crt; -# ssl_certificate_key /opt/server.key; -# server_name YOURDNS.EXTENSION; -# ssl_certificate /etc/pki/tls/certs/wildcard.saltpad.net.crt; -# ssl_certificate_key /etc/pki/tls/certs/wildcard.saltpad.net.pem; - - location / { - proxy_pass http://localhost:8080/; -# proxy_pass http://localhost:5000/; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Host $http_host; - proxy_redirect off; - proxy_set_header X-Real-IP $remote_addr; - } -} - diff --git a/states/roles/maintain/saltpad/init.sls b/states/roles/maintain/saltpad/init.sls deleted file mode 100644 index 9369782..0000000 --- a/states/roles/maintain/saltpad/init.sls +++ /dev/null @@ -1,57 +0,0 @@ -salt-api: - pkg.installed: [] - service.running: - - enable: true - -saltpad: - service.running: - - enable: true - - watch: - - cmd: "sh /root/salt/scripts/merge.sh" - -nginx: - service.running: - - enable: true - - watch: - - file: /etc/nginx/conf.d/* - -/etc/nginx/conf.d/: - file.recurse: - - source: salt://roles/maintain/saltpad/conf.d/ - - user: root - - group: root - - dir_mode: 755 - - file_mode: 644 - - clean: true - -/root/salt/scripts/merge.sh: - file.managed: - - makedirs: true - - source: salt://roles/maintain/saltpad/merge.sh - - user: root - - group: root - - mode: 744 - -/opt/saltpad/saltpad/modules/: - file.recurse: - - source: salt://roles/maintain/saltpad/modules/ - - user: root - - group: root - - dir_mode: 755 - - file_mode: 644 - - clean: true - -"sh /root/salt/scripts/merge.sh": - cmd.wait: - - watch: - - file: /opt/saltpad/saltpad/modules/* - - require: - - file: /root/salt/scripts/merge.sh - -/opt/saltpad/saltpad/templates/: - file.recurse: - - source: salt://roles/maintain/saltpad/templates/ - - user: root - - group: root - - dir_mode: 755 - - file_mode: 644 diff --git a/states/roles/maintain/saltpad/merge.sh b/states/roles/maintain/saltpad/merge.sh deleted file mode 100644 index 58cc301..0000000 --- a/states/roles/maintain/saltpad/merge.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -cd /opt/saltpad/saltpad/ -cp app.py merged.py - -for mod in modules/*.py -do - #insert above root node - sed -i "/@app.route(\"\/\")/ {r $mod - N}" merged.py -done diff --git a/states/roles/maintain/saltpad/modules/firewalld.py b/states/roles/maintain/saltpad/modules/firewalld.py deleted file mode 100644 index b3f8405..0000000 --- a/states/roles/maintain/saltpad/modules/firewalld.py +++ /dev/null @@ -1,48 +0,0 @@ -import yaml; - -@app.route("/firewalld/") -@login_required -def firewalld(role): - env = "prod" - fname = "/srv/salt/" + env + "/pillars/roles/firewalld/" + role + ".sls" - if (os.path.exists(fname)): - firewalld = import_yaml(fname) - print(firewalld) - else: - firewalld = "{'firewalld':{}}" - return render_template('firewalld_edit.html', firewalld=firewalld,role=role, is_mnt=is_role_maintainer(role)) - -@app.route("/firewalld_save/",methods=['POST']) -@login_required -def firewalld_save(role): - if is_role_maintainer(role): - env = "prod" - data=request.get_json() - print(data) - y = yaml.safe_dump(data,default_flow_style=False, indent=2) - # fix weird quirk - when not using default_flow_style, "-" lines aren't indented properly - y = y.replace("-"," -") - print(y) - fname = "/srv/salt/" + env + "/pillars/roles/firewalld/" + role + ".sls" - f = open(fname,"w") - f.write(y) - return redirect("/firewalld/" + role,302) - -#remove everything prior to the last "/" and then everything after the first "." in the name to ensure name isn't malicious -def cleanse_name(name): - name = re.sub('^.*/','',name)#remove everything up to and including final / - name = re.sub('^\.*','',name)#remove any .s at start of name that remain - print(name) - name = re.sub('\.*$','',name)#remove everything after and including first . - print(name) - return name - -def import_yaml(fname): - y = {} - if os.path.exists(fname): - f = open(fname,"r") - lines = f.read() - f.close() - y = yaml.load(lines) - return y - diff --git a/states/roles/maintain/saltpad/modules/roles.py b/states/roles/maintain/saltpad/modules/roles.py deleted file mode 100644 index a784474..0000000 --- a/states/roles/maintain/saltpad/modules/roles.py +++ /dev/null @@ -1,187 +0,0 @@ -import os - -@app.route("/roles") -@login_required -def roles(): - env = "prod" - host = os.uname()[1] - roles = client.run("pillar.items",client="local", tgt=host)[host]["roles"] - print(roles) - #add servers that have each role - dname = "/srv/salt/" + env + "/pillars/servers/roles/server/" - for role in roles: - roles[role]["servers"]=[] - for fname in os.listdir(dname): - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - sname = os.path.splitext(fname)[0] - print(sname) - for line in lines: - role = line.replace("-","").replace("\n","").strip() - if role != "roles:" and role != "grains:": - roles[role]["servers"].append(sname) - print(roles) - - usr = session.get('username') - from subprocess import check_output - grpstr = check_output(["groups", usr]) - groups = grpstr.split(" : ")[1].split() - maintainers = [usr] - for grp in groups: - maintainers.append("%"+grp+"%") - print(maintainers) - return render_template('roles.html', roles=roles, mnts=maintainers) - -@app.route("/role_add/") -@login_required -def role_add(role): - env = "prod" - #we need to create a file at pillars/maintainer/roles/[role].sls for this page. it should have the current user as initial maintainer - #Only do if file doesn't already exist - fname = "/srv/salt/" + env + "/pillars/roles/maintainer/" + role + ".sls" - if not (os.path.exists(fname)): - f = open(fname,"w") - f.write("roles:\n") - f.write(" "+role+":\n") - f.write(" maintainer:\n") - user = session.get('username') - f.write(" - "+user+"\n") - f.close() - #We need to add role to pillars/maintainer/roles/init.sls - fname = "/srv/salt/" + env + "/pillars/roles/maintainer/init.sls" - if (os.path.exists(fname)): - f = open(fname,"a") - f.write(" - roles.maintainer." + role + "\n") - f.close() - return redirect("/roles",302) - -@app.route("/role_del/") -@login_required -def role_del(role): - if is_role_maintainer(role): - env = "prod" - #We need to remove role from pillars/maintainer/roles/init.sls - fname = "/srv/salt/" + env + "/pillars/roles/maintainer/init.sls" - if (os.path.exists(fname)): - f = open(fname,"r") - lines = f.readlines() - f.close() - f = open(fname,"w") - for line in lines: - if line != " - roles.maintainer." + role + "\n": - f.write(line) - f.close() - #we need to remove maintainer file for the role if it exists - fname = "/srv/salt/" + env + "/pillars/roles/maintainer/" + role + ".sls" - if (os.path.exists(fname)): - os.remove(fname) - #We need to remove references to the role next - dname = "/srv/salt/" + env + "/pillars/servers/roles/server/" - for fname in os.listdir(dname): - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - f = open(dname+fname,"w") - for line in lines: - if line != " - " + role + "\n": - f.write(line) - f.close() - return redirect("/roles",302) - -@app.route("/role/") -@login_required -def role_display(role): - #build json object - env = "prod" - rjson = {} - dname = "/srv/salt/" + env + "/pillars/roles/" - for folder in os.listdir(dname): - print(folder) - if os.path.isdir(dname + folder): - #grab matching file - fname = dname + folder + "/" + role + ".sls" - rjson[folder]=[] - if os.path.exists(fname): - f = open(fname,"r") - lines = f.readlines() - f.close() - for line in lines: - #add maintainers - if folder == "maintainer": - item = line.replace("-","").replace("\n","").strip() - if item != "roles:" and item != role+":" and item != "maintainer:": - rjson[folder].append(item) - else: - #add everything else - line=line.rstrip() - rjson[folder].append(line) - dname = "/srv/salt/" + env + "/pillars/servers/roles/server/" - rjson["servers"]=[] - for fname in os.listdir(dname): - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - sname = os.path.splitext(fname)[0] - print(sname) - for line in lines: - thisrole = line.replace("-","").replace("\n","").strip() - if role == thisrole: - rjson["servers"].append(sname) - rjson["name"] = role - print(rjson) - return render_template('role_display.html', rjson=rjson, is_mnt=is_role_maintainer(rjson["name"])) - -@app.route("/role_add_server//") -@login_required -def role_add_server(role, server): - if is_server_maintainer(server): - env = "prod" - #We need to add role to pillars/servers/roles/server/.sls - fname = "/srv/salt/" + env + "/pillars/servers/roles/server/" + server + ".sls" - print(fname) - print(server) - print(role) - if (os.path.exists(fname)): - f = open(fname,"a") - f.write(" - " + role + "\n") - f.close() - return redirect("/role/" + role,302) - -@app.route("/role_rem_server//") -@login_required -def role_rem_server(role, server): - if is_server_maintainer(server): - env = "prod" - #We need to remove role from pillars/servers/roles/server/.sls - fname = "/srv/salt/" + env + "/pillars/servers/roles/server/" + server + ".sls" - print(fname) - print(server) - print(role) - if (os.path.exists(fname)): - f = open(fname,"r") - lines = f.readlines() - f.close() - f = open(fname,"w") - for line in lines: - if line != " - " + role + "\n": - f.write(line) - f.close() - return redirect("/role/" + role,302) - -def is_role_maintainer(role): - host = os.uname()[1] - maintainers = client.run("pillar.items",client="local", tgt=host)[host]["roles"][role]["maintainer"] - usr = session.get('username') - from subprocess import check_output - grpstr = check_output(["groups", usr]) - groups = grpstr.split(" : ")[1].split() - is_maintainer = False - if usr in maintainers: - is_maintainer = True - else: - for grp in groups: - if "%"+grp+"%" in maintainers: - is_maintainer = True - break - return is_maintainer diff --git a/states/roles/maintain/saltpad/modules/servers.py b/states/roles/maintain/saltpad/modules/servers.py deleted file mode 100644 index 65b5a9e..0000000 --- a/states/roles/maintain/saltpad/modules/servers.py +++ /dev/null @@ -1,121 +0,0 @@ -@app.route("/servers") -@login_required -def servers(): - env = "prod" - host = os.uname()[1] - accepted = client.run('key.list_all', client='wheel')['data']['return']['minions'] - print(accepted) - #add maintainers for each server - dname = "/srv/salt/" + env + "/pillars/servers/maintainer/server/" - servers={} - for server in accepted: - servers[server]={} - servers[server]["maintainer"]=[] - servers[server]["roles"]=[] - print(servers) - for fname in os.listdir(dname): - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - sname = os.path.splitext(fname)[0] - print(sname) - for line in lines: - maintainer = line.replace("-","").replace("\n","").strip() - if maintainer != "maintainer:": - servers[sname]["maintainer"].append(maintainer) - #add roles for each server - dname = "/srv/salt/" + env + "/pillars/servers/roles/server/" - for fname in os.listdir(dname): - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - sname = os.path.splitext(fname)[0] - print(sname) - for line in lines: - role = line.replace("-","").replace("\n","").strip() - if role != "roles:" and role != "grains:": - servers[sname]["roles"].append(role) - print(servers) - return render_template('servers.html', servers=servers) - -@app.route("/servers/") -@login_required -def server_display(server): - env = "prod" - host = os.uname()[1] - aroles = client.run("pillar.items",client="local", tgt=host)[host]["roles"] - sroles=[] - dname = "/srv/salt/" + env + "/pillars/servers/roles/server/" - fname = server + ".sls" - f = open(dname+fname,"r") - lines = f.readlines() - f.close() - for line in lines: - role = line.replace("-","").replace("\n","").strip() - if role != "roles:" and role != "grains:": - sroles.append(role) - del aroles[role] - return render_template('server_display.html', server=server, aroles=aroles, sroles=sroles,is_mnt=is_server_maintainer(server)) - -@app.route("/server_add_role//") -@login_required -def server_add_role(server, role): - if is_server_maintainer(server): - env = "prod" - #We need to add role to pillars/servers/roles/server/.sls - fname = "/srv/salt/" + env + "/pillars/servers/roles/server/" + server + ".sls" - print(fname) - print(server) - print(role) - if (os.path.exists(fname)): - f = open(fname,"a") - f.write(" - " + role + "\n") - f.close() - return redirect("/servers/" + server,302) - -@app.route("/server_rem_role//") -@login_required -def server_rem_role(server, role): - if is_server_maintainer(server): - env = "prod" - #We need to remove role from pillars/servers/roles/server/.sls - fname = "/srv/salt/" + env + "/pillars/servers/roles/server/" + server + ".sls" - print(fname) - print(server) - print(role) - if (os.path.exists(fname)): - f = open(fname,"r") - lines = f.readlines() - f.close() - f = open(fname,"w") - for line in lines: - if line != " - " + role + "\n": - f.write(line) - f.close() - return redirect("/servers/" + server,302) - -def is_server_maintainer(server): - env = "prod" - fname = "/srv/salt/" + env + "/pillars/servers/maintainer/server/"+server+".sls" - f = open(fname,"r") - lines = f.readlines() - f.close() - maintainers = [] - for line in lines: - maintainer = line.replace("-","").replace("\n","").strip() - if maintainer != "maintainer:": - maintainers.append(maintainer) - usr = session.get('username') - from subprocess import check_output - grpstr = check_output(["groups", usr]) - groups = grpstr.split(" : ")[1].split() - is_maintainer = False - if usr in maintainers: - is_maintainer = True - else: - for grp in groups: - if "%"+grp+"%" in maintainers: - is_maintainer = True - break - return is_maintainer - diff --git a/states/roles/maintain/saltpad/templates/base.html b/states/roles/maintain/saltpad/templates/base.html deleted file mode 100644 index 28d2fe7..0000000 --- a/states/roles/maintain/saltpad/templates/base.html +++ /dev/null @@ -1,49 +0,0 @@ - - - - - - - - - Dashboard - SaltPad - - - - - - - - - - - - - {% block head %} - {% endblock %} - - - - - {% block body %} - {% endblock %} - - - - - - - - - - - - - - {% block scripts %} - {% endblock %} - - - diff --git a/states/roles/maintain/saltpad/templates/base_logged.html b/states/roles/maintain/saltpad/templates/base_logged.html deleted file mode 100644 index d863218..0000000 --- a/states/roles/maintain/saltpad/templates/base_logged.html +++ /dev/null @@ -1,137 +0,0 @@ -{% extends "base.html" %} -{% from "macros.jinja" import print_flash_messages %} -{% block body %} - -
- - - - -
- - {{ print_flash_messages()|safe }} - - {% block page %} - {% endblock %} - -
- -
- -{% endblock %} diff --git a/states/roles/maintain/saltpad/templates/firewalld_edit.html b/states/roles/maintain/saltpad/templates/firewalld_edit.html deleted file mode 100644 index f494195..0000000 --- a/states/roles/maintain/saltpad/templates/firewalld_edit.html +++ /dev/null @@ -1,359 +0,0 @@ -{% extends "base_logged.html" %} -{% block head %} - -{% endblock %} -{% block page %} -
- -
-
-

Module Firewalld

- -
-
- -{{firewalld["firewalld"].update(base_zone={})}} -
-
- -
- - - - {% for zone in firewalld["firewalld"] %} -
- -
-
- - - - - - - - - {% if firewalld["firewalld"][zone] is defined %} - {% for source in firewalld["firewalld"][zone]["source"] %} - - - - - {% endfor %} - {% endif %} - -
SourceRemove
{{ source }}
-
-
- - -
-
- - -
-
- - - - - - - - - {% if firewalld["firewalld"][zone] is defined %} - {% for service in firewalld["firewalld"][zone]["service"] %} - - - - - {% endfor %} - {% endif %} - -
ServiceRemove
{{ service }}
-
-
- - -
-
- - -
-
- - - - - - - - - {% if firewalld["firewalld"][zone] is defined %} - {% for source in firewalld["firewalld"][zone]["port"] %} - - - - - {% endfor %} - {% endif %} - -
PortRemove
{{ port }}
-
-
- - -
-
- -
-

Custom Rules


-
- - - - - - - - - - - - {% if firewalld["firewalld"][zone] is defined %} - {% for rule in firewalld["firewalld"][zone]["rule"] %} - - - - - - - - {% endfor %} - {% endif %} - -
NameSourcePortActionRemove
{{ rule }}{{ firewalld["firewalld"][zone]["rule"][rule]["source"] }}{{ firewalld["firewalld"][zone]["rule"][rule]["port"] }}{{ firewalld["firewalld"][zone]["rule"][rule]["action"] }}
-
-
- Name: - Source: - Port: - Action: - -
-
- - -
-

Include

-

include data from other zones to avoid rewriting it

-
- - - - - - - - - {% if firewalld["firewalld"][zone] is defined %} - {% for include in firewalld["firewalld"][zone]["include"] %} - - - - - {% endfor %} - {% endif %} - -
ZoneRemove
{{ include }}
-
-
- - -
-
-
-{% endfor %} - -
-{% endblock %} - -{% block scripts %} - -{% endblock %} \ No newline at end of file diff --git a/states/roles/maintain/saltpad/templates/mods.html b/states/roles/maintain/saltpad/templates/mods.html deleted file mode 100644 index b254e5b..0000000 --- a/states/roles/maintain/saltpad/templates/mods.html +++ /dev/null @@ -1,2 +0,0 @@ -
  • Roles
  • -
  • Servers
  • \ No newline at end of file diff --git a/states/roles/maintain/saltpad/templates/role_display.html b/states/roles/maintain/saltpad/templates/role_display.html deleted file mode 100644 index fad6ad2..0000000 --- a/states/roles/maintain/saltpad/templates/role_display.html +++ /dev/null @@ -1,136 +0,0 @@ -{% extends "base_logged.html" %} -{% block page %} -
    - -
    -
    -

    Module Roles

    - -
    -
    - -
    -
    -

    Maintainers

    -
    - - - - - {% if is_mnt %} - - {% endif %} - - - - {% for maintainer in rjson["maintainer"] %} - - - - - {% endfor %} - -
    MaintainerRemove
    {{ maintainer }} - {% if is_mnt %} - - {% endif %} -
    -
    -
    - {% if is_mnt%} - - - {% endif %} -
    -
    -
    -

    Servers

    -
    - - - - - {% if is_mnt %} - - {% endif %} - - - - {% for server in rjson["servers"] %} - - - - - {% endfor %} - -
    ServerRemove
    {{ server }} - {% if is_mnt %} - - {% endif %} -
    -
    -
    - {% if is_mnt %} - - - {% endif %} -
    -
    - -{% for category in rjson %} - {% if category != "maintainer" and category != "servers" and category != "name" %} -
    -

    {{ category }}

    -
    - - - - - - - - - -
    {{ category }}
    -
    {% for line in rjson[category] %}
    -{{ line }}{% endfor %}
    -
    -
    - -
    - {% endif %} -{% endfor %} - - -
    -
    -{% endblock %} - -{% block scripts %} - -{% endblock %} diff --git a/states/roles/maintain/saltpad/templates/roles.html b/states/roles/maintain/saltpad/templates/roles.html deleted file mode 100644 index 6ef7341..0000000 --- a/states/roles/maintain/saltpad/templates/roles.html +++ /dev/null @@ -1,92 +0,0 @@ -{% extends "base_logged.html" %} -{% block page %} -
    - -
    -
    -

    Modules Roles

    - -
    -
    - -
    -
    -

    Roles

    -
    - - - - - - - - - - - {% for role in roles %} - - - - - - - {% endfor %} - -
    RoleMaintainersServersDelete
    {{ role }} - {% for maintainer in roles[role]["maintainer"] %} - {{ maintainer }}
    - {% endfor %} -
    - {% for server in roles[role]["servers"] %} - {{ server }}
    - {% endfor %} -
    - {% for maintainer in roles[role]["maintainer"] %} - {% if maintainer in mnts %} - - {% endif %} - {% endfor %} -
    -
    -
    - - -
    -
    -
    -
    -{% endblock %} - -{% block scripts %} - -{% endblock %} \ No newline at end of file diff --git a/states/roles/maintain/saltpad/templates/server_display.html b/states/roles/maintain/saltpad/templates/server_display.html deleted file mode 100644 index ae8718b..0000000 --- a/states/roles/maintain/saltpad/templates/server_display.html +++ /dev/null @@ -1,74 +0,0 @@ -{% extends "base_logged.html" %} -{% block page %} -
    - -
    -
    -

    Modules Servers

    - -
    -
    - -
    -
    - {% if not is_mnt %} -

    Note: Server is maintained by someone else

    - {% endif %} -

    Server Roles

    -
    - - - - - {% if is_mnt %} - - {% endif %} - - - - {% for role in sroles %} - - - - - {% endfor %} - -
    RolesRemove
    {{ role }} - {% if is_mnt %} - - {% endif %} -
    -
    - {% if is_mnt %} -
    - - -
    - {% endif %} -
    -
    -
    -{% endblock %} - -{% block scripts %} - -{% endblock %} \ No newline at end of file diff --git a/states/roles/maintain/saltpad/templates/servers.html b/states/roles/maintain/saltpad/templates/servers.html deleted file mode 100644 index d37bf92..0000000 --- a/states/roles/maintain/saltpad/templates/servers.html +++ /dev/null @@ -1,49 +0,0 @@ -{% extends "base_logged.html" %} -{% block page %} -
    - -
    -
    -

    Modules Servers

    - -
    -
    - -
    -
    -

    Servers

    -
    - - - - - - - - - - {% for server in servers %} - - - - - - {% endfor %} - -
    ServerMaintainersRoles
    {{ server }} - {% for maintainer in servers[server]["maintainer"] %} - {{ maintainer }}
    - {% endfor %} -
    - {% for role in servers[server]["roles"] %} - {{ role }}
    - {% endfor %} -
    -
    -
    -
    -
    -{% endblock %} \ No newline at end of file diff --git a/states/roles/maintain/saltpad/templates/yaml.js b/states/roles/maintain/saltpad/templates/yaml.js deleted file mode 100644 index 981455c..0000000 --- a/states/roles/maintain/saltpad/templates/yaml.js +++ /dev/null @@ -1,1866 +0,0 @@ -(function e(t,n,r){function s(o,u){if(!n[o]){if(!t[o]){var a=typeof require=="function"&&require;if(!u&&a)return a(o,!0);if(i)return i(o,!0);var f=new Error("Cannot find module '"+o+"'");throw f.code="MODULE_NOT_FOUND",f}var l=n[o]={exports:{}};t[o][0].call(l.exports,function(e){var n=t[o][1][e];return s(n?n:e)},l,l.exports,e,t,n,r)}return n[o].exports}var i=typeof require=="function"&&require;for(var o=0;o ref; i = 0 <= ref ? ++j : --j) { - mapping[Escaper.LIST_ESCAPEES[i]] = Escaper.LIST_ESCAPED[i]; - } - return mapping; - })(); - - Escaper.PATTERN_CHARACTERS_TO_ESCAPE = new Pattern('[\\x00-\\x1f]|\xc2\x85|\xc2\xa0|\xe2\x80\xa8|\xe2\x80\xa9'); - - Escaper.PATTERN_MAPPING_ESCAPEES = new Pattern(Escaper.LIST_ESCAPEES.join('|')); - - Escaper.PATTERN_SINGLE_QUOTING = new Pattern('[\\s\'":{}[\\],&*#?]|^[-?|<>=!%@`]'); - - Escaper.requiresDoubleQuoting = function(value) { - return this.PATTERN_CHARACTERS_TO_ESCAPE.test(value); - }; - - Escaper.escapeWithDoubleQuotes = function(value) { - var result; - result = this.PATTERN_MAPPING_ESCAPEES.replace(value, (function(_this) { - return function(str) { - return _this.MAPPING_ESCAPEES_TO_ESCAPED[str]; - }; - })(this)); - return '"' + result + '"'; - }; - - Escaper.requiresSingleQuoting = function(value) { - return this.PATTERN_SINGLE_QUOTING.test(value); - }; - - Escaper.escapeWithSingleQuotes = function(value) { - return "'" + value.replace(/'/g, "''") + "'"; - }; - - return Escaper; - -})(); - -module.exports = Escaper; - - - -},{"./Pattern":7}],3:[function(require,module,exports){ -var DumpException, - extend = function(child, parent) { for (var key in parent) { if (hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }, - hasProp = {}.hasOwnProperty; - -DumpException = (function(superClass) { - extend(DumpException, superClass); - - function DumpException(message, parsedLine, snippet) { - this.message = message; - this.parsedLine = parsedLine; - this.snippet = snippet; - } - - DumpException.prototype.toString = function() { - if ((this.parsedLine != null) && (this.snippet != null)) { - return ' ' + this.message + ' (line ' + this.parsedLine + ': \'' + this.snippet + '\')'; - } else { - return ' ' + this.message; - } - }; - - return DumpException; - -})(Error); - -module.exports = DumpException; - - - -},{}],4:[function(require,module,exports){ -var ParseException, - extend = function(child, parent) { for (var key in parent) { if (hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; }, - hasProp = {}.hasOwnProperty; - -ParseException = (function(superClass) { - extend(ParseException, superClass); - - function ParseException(message, parsedLine, snippet) { - this.message = message; - this.parsedLine = parsedLine; - this.snippet = snippet; - } - - ParseException.prototype.toString = function() { - if ((this.parsedLine != null) && (this.snippet != null)) { - return ' ' + this.message + ' (line ' + this.parsedLine + ': \'' + this.snippet + '\')'; - } else { - return ' ' + this.message; - } - }; - - return ParseException; - -})(Error); - -module.exports = ParseException; - - - -},{}],5:[function(require,module,exports){ -var DumpException, Escaper, Inline, ParseException, Pattern, Unescaper, Utils, - indexOf = [].indexOf || function(item) { for (var i = 0, l = this.length; i < l; i++) { if (i in this && this[i] === item) return i; } return -1; }; - -Pattern = require('./Pattern'); - -Unescaper = require('./Unescaper'); - -Escaper = require('./Escaper'); - -Utils = require('./Utils'); - -ParseException = require('./Exception/ParseException'); - -DumpException = require('./Exception/DumpException'); - -Inline = (function() { - function Inline() {} - - Inline.REGEX_QUOTED_STRING = '(?:"(?:[^"\\\\]*(?:\\\\.[^"\\\\]*)*)"|\'(?:[^\']*(?:\'\'[^\']*)*)\')'; - - Inline.PATTERN_TRAILING_COMMENTS = new Pattern('^\\s*#.*$'); - - Inline.PATTERN_QUOTED_SCALAR = new Pattern('^' + Inline.REGEX_QUOTED_STRING); - - Inline.PATTERN_THOUSAND_NUMERIC_SCALAR = new Pattern('^(-|\\+)?[0-9,]+(\\.[0-9]+)?$'); - - Inline.PATTERN_SCALAR_BY_DELIMITERS = {}; - - Inline.settings = {}; - - Inline.configure = function(exceptionOnInvalidType, objectDecoder) { - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = null; - } - if (objectDecoder == null) { - objectDecoder = null; - } - this.settings.exceptionOnInvalidType = exceptionOnInvalidType; - this.settings.objectDecoder = objectDecoder; - }; - - Inline.parse = function(value, exceptionOnInvalidType, objectDecoder) { - var context, result; - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectDecoder == null) { - objectDecoder = null; - } - this.settings.exceptionOnInvalidType = exceptionOnInvalidType; - this.settings.objectDecoder = objectDecoder; - if (value == null) { - return ''; - } - value = Utils.trim(value); - if (0 === value.length) { - return ''; - } - context = { - exceptionOnInvalidType: exceptionOnInvalidType, - objectDecoder: objectDecoder, - i: 0 - }; - switch (value.charAt(0)) { - case '[': - result = this.parseSequence(value, context); - ++context.i; - break; - case '{': - result = this.parseMapping(value, context); - ++context.i; - break; - default: - result = this.parseScalar(value, null, ['"', "'"], context); - } - if (this.PATTERN_TRAILING_COMMENTS.replace(value.slice(context.i), '') !== '') { - throw new ParseException('Unexpected characters near "' + value.slice(context.i) + '".'); - } - return result; - }; - - Inline.dump = function(value, exceptionOnInvalidType, objectEncoder) { - var ref, result, type; - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectEncoder == null) { - objectEncoder = null; - } - if (value == null) { - return 'null'; - } - type = typeof value; - if (type === 'object') { - if (value instanceof Date) { - return value.toISOString(); - } else if (objectEncoder != null) { - result = objectEncoder(value); - if (typeof result === 'string' || (result != null)) { - return result; - } - } - return this.dumpObject(value); - } - if (type === 'boolean') { - return (value ? 'true' : 'false'); - } - if (Utils.isDigits(value)) { - return (type === 'string' ? "'" + value + "'" : String(parseInt(value))); - } - if (Utils.isNumeric(value)) { - return (type === 'string' ? "'" + value + "'" : String(parseFloat(value))); - } - if (type === 'number') { - return (value === Infinity ? '.Inf' : (value === -Infinity ? '-.Inf' : (isNaN(value) ? '.NaN' : value))); - } - if (Escaper.requiresDoubleQuoting(value)) { - return Escaper.escapeWithDoubleQuotes(value); - } - if (Escaper.requiresSingleQuoting(value)) { - return Escaper.escapeWithSingleQuotes(value); - } - if ('' === value) { - return '""'; - } - if (Utils.PATTERN_DATE.test(value)) { - return "'" + value + "'"; - } - if ((ref = value.toLowerCase()) === 'null' || ref === '~' || ref === 'true' || ref === 'false') { - return "'" + value + "'"; - } - return value; - }; - - Inline.dumpObject = function(value, exceptionOnInvalidType, objectSupport) { - var j, key, len1, output, val; - if (objectSupport == null) { - objectSupport = null; - } - if (value instanceof Array) { - output = []; - for (j = 0, len1 = value.length; j < len1; j++) { - val = value[j]; - output.push(this.dump(val)); - } - return '[' + output.join(', ') + ']'; - } else { - output = []; - for (key in value) { - val = value[key]; - output.push(this.dump(key) + ': ' + this.dump(val)); - } - return '{' + output.join(', ') + '}'; - } - }; - - Inline.parseScalar = function(scalar, delimiters, stringDelimiters, context, evaluate) { - var i, joinedDelimiters, match, output, pattern, ref, ref1, strpos, tmp; - if (delimiters == null) { - delimiters = null; - } - if (stringDelimiters == null) { - stringDelimiters = ['"', "'"]; - } - if (context == null) { - context = null; - } - if (evaluate == null) { - evaluate = true; - } - if (context == null) { - context = { - exceptionOnInvalidType: this.settings.exceptionOnInvalidType, - objectDecoder: this.settings.objectDecoder, - i: 0 - }; - } - i = context.i; - if (ref = scalar.charAt(i), indexOf.call(stringDelimiters, ref) >= 0) { - output = this.parseQuotedScalar(scalar, context); - i = context.i; - if (delimiters != null) { - tmp = Utils.ltrim(scalar.slice(i), ' '); - if (!(ref1 = tmp.charAt(0), indexOf.call(delimiters, ref1) >= 0)) { - throw new ParseException('Unexpected characters (' + scalar.slice(i) + ').'); - } - } - } else { - if (!delimiters) { - output = scalar.slice(i); - i += output.length; - strpos = output.indexOf(' #'); - if (strpos !== -1) { - output = Utils.rtrim(output.slice(0, strpos)); - } - } else { - joinedDelimiters = delimiters.join('|'); - pattern = this.PATTERN_SCALAR_BY_DELIMITERS[joinedDelimiters]; - if (pattern == null) { - pattern = new Pattern('^(.+?)(' + joinedDelimiters + ')'); - this.PATTERN_SCALAR_BY_DELIMITERS[joinedDelimiters] = pattern; - } - if (match = pattern.exec(scalar.slice(i))) { - output = match[1]; - i += output.length; - } else { - throw new ParseException('Malformed inline YAML string (' + scalar + ').'); - } - } - if (evaluate) { - output = this.evaluateScalar(output, context); - } - } - context.i = i; - return output; - }; - - Inline.parseQuotedScalar = function(scalar, context) { - var i, match, output; - i = context.i; - if (!(match = this.PATTERN_QUOTED_SCALAR.exec(scalar.slice(i)))) { - throw new ParseException('Malformed inline YAML string (' + scalar.slice(i) + ').'); - } - output = match[0].substr(1, match[0].length - 2); - if ('"' === scalar.charAt(i)) { - output = Unescaper.unescapeDoubleQuotedString(output); - } else { - output = Unescaper.unescapeSingleQuotedString(output); - } - i += match[0].length; - context.i = i; - return output; - }; - - Inline.parseSequence = function(sequence, context) { - var e, i, isQuoted, len, output, ref, value; - output = []; - len = sequence.length; - i = context.i; - i += 1; - while (i < len) { - context.i = i; - switch (sequence.charAt(i)) { - case '[': - output.push(this.parseSequence(sequence, context)); - i = context.i; - break; - case '{': - output.push(this.parseMapping(sequence, context)); - i = context.i; - break; - case ']': - return output; - case ',': - case ' ': - case "\n": - break; - default: - isQuoted = ((ref = sequence.charAt(i)) === '"' || ref === "'"); - value = this.parseScalar(sequence, [',', ']'], ['"', "'"], context); - i = context.i; - if (!isQuoted && typeof value === 'string' && (value.indexOf(': ') !== -1 || value.indexOf(":\n") !== -1)) { - try { - value = this.parseMapping('{' + value + '}'); - } catch (_error) { - e = _error; - } - } - output.push(value); - --i; - } - ++i; - } - throw new ParseException('Malformed inline YAML string ' + sequence); - }; - - Inline.parseMapping = function(mapping, context) { - var done, i, key, len, output, shouldContinueWhileLoop, value; - output = {}; - len = mapping.length; - i = context.i; - i += 1; - shouldContinueWhileLoop = false; - while (i < len) { - context.i = i; - switch (mapping.charAt(i)) { - case ' ': - case ',': - case "\n": - ++i; - context.i = i; - shouldContinueWhileLoop = true; - break; - case '}': - return output; - } - if (shouldContinueWhileLoop) { - shouldContinueWhileLoop = false; - continue; - } - key = this.parseScalar(mapping, [':', ' ', "\n"], ['"', "'"], context, false); - i = context.i; - done = false; - while (i < len) { - context.i = i; - switch (mapping.charAt(i)) { - case '[': - value = this.parseSequence(mapping, context); - i = context.i; - if (output[key] === void 0) { - output[key] = value; - } - done = true; - break; - case '{': - value = this.parseMapping(mapping, context); - i = context.i; - if (output[key] === void 0) { - output[key] = value; - } - done = true; - break; - case ':': - case ' ': - case "\n": - break; - default: - value = this.parseScalar(mapping, [',', '}'], ['"', "'"], context); - i = context.i; - if (output[key] === void 0) { - output[key] = value; - } - done = true; - --i; - } - ++i; - if (done) { - break; - } - } - } - throw new ParseException('Malformed inline YAML string ' + mapping); - }; - - Inline.evaluateScalar = function(scalar, context) { - var cast, date, exceptionOnInvalidType, firstChar, firstSpace, firstWord, objectDecoder, raw, scalarLower, subValue, trimmedScalar; - scalar = Utils.trim(scalar); - scalarLower = scalar.toLowerCase(); - switch (scalarLower) { - case 'null': - case '': - case '~': - return null; - case 'true': - return true; - case 'false': - return false; - case '.inf': - return Infinity; - case '.nan': - return NaN; - case '-.inf': - return Infinity; - default: - firstChar = scalarLower.charAt(0); - switch (firstChar) { - case '!': - firstSpace = scalar.indexOf(' '); - if (firstSpace === -1) { - firstWord = scalarLower; - } else { - firstWord = scalarLower.slice(0, firstSpace); - } - switch (firstWord) { - case '!': - if (firstSpace !== -1) { - return parseInt(this.parseScalar(scalar.slice(2))); - } - return null; - case '!str': - return Utils.ltrim(scalar.slice(4)); - case '!!str': - return Utils.ltrim(scalar.slice(5)); - case '!!int': - return parseInt(this.parseScalar(scalar.slice(5))); - case '!!bool': - return Utils.parseBoolean(this.parseScalar(scalar.slice(6)), false); - case '!!float': - return parseFloat(this.parseScalar(scalar.slice(7))); - case '!!timestamp': - return Utils.stringToDate(Utils.ltrim(scalar.slice(11))); - default: - if (context == null) { - context = { - exceptionOnInvalidType: this.settings.exceptionOnInvalidType, - objectDecoder: this.settings.objectDecoder, - i: 0 - }; - } - objectDecoder = context.objectDecoder, exceptionOnInvalidType = context.exceptionOnInvalidType; - if (objectDecoder) { - trimmedScalar = Utils.rtrim(scalar); - firstSpace = trimmedScalar.indexOf(' '); - if (firstSpace === -1) { - return objectDecoder(trimmedScalar, null); - } else { - subValue = Utils.ltrim(trimmedScalar.slice(firstSpace + 1)); - if (!(subValue.length > 0)) { - subValue = null; - } - return objectDecoder(trimmedScalar.slice(0, firstSpace), subValue); - } - } - if (exceptionOnInvalidType) { - throw new ParseException('Custom object support when parsing a YAML file has been disabled.'); - } - return null; - } - break; - case '0': - if ('0x' === scalar.slice(0, 2)) { - return Utils.hexDec(scalar); - } else if (Utils.isDigits(scalar)) { - return Utils.octDec(scalar); - } else if (Utils.isNumeric(scalar)) { - return parseFloat(scalar); - } else { - return scalar; - } - break; - case '+': - if (Utils.isDigits(scalar)) { - raw = scalar; - cast = parseInt(raw); - if (raw === String(cast)) { - return cast; - } else { - return raw; - } - } else if (Utils.isNumeric(scalar)) { - return parseFloat(scalar); - } else if (this.PATTERN_THOUSAND_NUMERIC_SCALAR.test(scalar)) { - return parseFloat(scalar.replace(',', '')); - } - return scalar; - case '-': - if (Utils.isDigits(scalar.slice(1))) { - if ('0' === scalar.charAt(1)) { - return -Utils.octDec(scalar.slice(1)); - } else { - raw = scalar.slice(1); - cast = parseInt(raw); - if (raw === String(cast)) { - return -cast; - } else { - return -raw; - } - } - } else if (Utils.isNumeric(scalar)) { - return parseFloat(scalar); - } else if (this.PATTERN_THOUSAND_NUMERIC_SCALAR.test(scalar)) { - return parseFloat(scalar.replace(',', '')); - } - return scalar; - default: - if (date = Utils.stringToDate(scalar)) { - return date; - } else if (Utils.isNumeric(scalar)) { - return parseFloat(scalar); - } else if (this.PATTERN_THOUSAND_NUMERIC_SCALAR.test(scalar)) { - return parseFloat(scalar.replace(',', '')); - } - return scalar; - } - } - }; - - return Inline; - -})(); - -module.exports = Inline; - - - -},{"./Escaper":2,"./Exception/DumpException":3,"./Exception/ParseException":4,"./Pattern":7,"./Unescaper":8,"./Utils":9}],6:[function(require,module,exports){ -var Inline, ParseException, Parser, Pattern, Utils; - -Inline = require('./Inline'); - -Pattern = require('./Pattern'); - -Utils = require('./Utils'); - -ParseException = require('./Exception/ParseException'); - -Parser = (function() { - Parser.prototype.PATTERN_FOLDED_SCALAR_ALL = new Pattern('^(?:(?![^\\|>]*)\\s+)?(?\\||>)(?\\+|\\-|\\d+|\\+\\d+|\\-\\d+|\\d+\\+|\\d+\\-)?(? +#.*)?$'); - - Parser.prototype.PATTERN_FOLDED_SCALAR_END = new Pattern('(?\\||>)(?\\+|\\-|\\d+|\\+\\d+|\\-\\d+|\\d+\\+|\\d+\\-)?(? +#.*)?$'); - - Parser.prototype.PATTERN_SEQUENCE_ITEM = new Pattern('^\\-((?\\s+)(?.+?))?\\s*$'); - - Parser.prototype.PATTERN_ANCHOR_VALUE = new Pattern('^&(?[^ ]+) *(?.*)'); - - Parser.prototype.PATTERN_COMPACT_NOTATION = new Pattern('^(?' + Inline.REGEX_QUOTED_STRING + '|[^ \'"\\{\\[].*?) *\\:(\\s+(?.+?))?\\s*$'); - - Parser.prototype.PATTERN_MAPPING_ITEM = new Pattern('^(?' + Inline.REGEX_QUOTED_STRING + '|[^ \'"\\[\\{].*?) *\\:(\\s+(?.+?))?\\s*$'); - - Parser.prototype.PATTERN_DECIMAL = new Pattern('\\d+'); - - Parser.prototype.PATTERN_INDENT_SPACES = new Pattern('^ +'); - - Parser.prototype.PATTERN_TRAILING_LINES = new Pattern('(\n*)$'); - - Parser.prototype.PATTERN_YAML_HEADER = new Pattern('^\\%YAML[: ][\\d\\.]+.*\n'); - - Parser.prototype.PATTERN_LEADING_COMMENTS = new Pattern('^(\\#.*?\n)+'); - - Parser.prototype.PATTERN_DOCUMENT_MARKER_START = new Pattern('^\\-\\-\\-.*?\n'); - - Parser.prototype.PATTERN_DOCUMENT_MARKER_END = new Pattern('^\\.\\.\\.\\s*$'); - - Parser.prototype.PATTERN_FOLDED_SCALAR_BY_INDENTATION = {}; - - Parser.prototype.CONTEXT_NONE = 0; - - Parser.prototype.CONTEXT_SEQUENCE = 1; - - Parser.prototype.CONTEXT_MAPPING = 2; - - function Parser(offset) { - this.offset = offset != null ? offset : 0; - this.lines = []; - this.currentLineNb = -1; - this.currentLine = ''; - this.refs = {}; - } - - Parser.prototype.parse = function(value, exceptionOnInvalidType, objectDecoder) { - var alias, allowOverwrite, block, c, context, data, e, first, i, indent, isRef, j, k, key, l, lastKey, len, len1, len2, len3, lineCount, m, matches, mergeNode, n, name, parsed, parsedItem, parser, ref, ref1, ref2, refName, refValue, val, values; - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectDecoder == null) { - objectDecoder = null; - } - this.currentLineNb = -1; - this.currentLine = ''; - this.lines = this.cleanup(value).split("\n"); - data = null; - context = this.CONTEXT_NONE; - allowOverwrite = false; - while (this.moveToNextLine()) { - if (this.isCurrentLineEmpty()) { - continue; - } - if ("\t" === this.currentLine[0]) { - throw new ParseException('A YAML file cannot contain tabs as indentation.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - isRef = mergeNode = false; - if (values = this.PATTERN_SEQUENCE_ITEM.exec(this.currentLine)) { - if (this.CONTEXT_MAPPING === context) { - throw new ParseException('You cannot define a sequence item when in a mapping'); - } - context = this.CONTEXT_SEQUENCE; - if (data == null) { - data = []; - } - if ((values.value != null) && (matches = this.PATTERN_ANCHOR_VALUE.exec(values.value))) { - isRef = matches.ref; - values.value = matches.value; - } - if (!(values.value != null) || '' === Utils.trim(values.value, ' ') || Utils.ltrim(values.value, ' ').indexOf('#') === 0) { - if (this.currentLineNb < this.lines.length - 1 && !this.isNextLineUnIndentedCollection()) { - c = this.getRealCurrentLineNb() + 1; - parser = new Parser(c); - parser.refs = this.refs; - data.push(parser.parse(this.getNextEmbedBlock(null, true), exceptionOnInvalidType, objectDecoder)); - } else { - data.push(null); - } - } else { - if (((ref = values.leadspaces) != null ? ref.length : void 0) && (matches = this.PATTERN_COMPACT_NOTATION.exec(values.value))) { - c = this.getRealCurrentLineNb(); - parser = new Parser(c); - parser.refs = this.refs; - block = values.value; - indent = this.getCurrentLineIndentation(); - if (this.isNextLineIndented(false)) { - block += "\n" + this.getNextEmbedBlock(indent + values.leadspaces.length + 1, true); - } - data.push(parser.parse(block, exceptionOnInvalidType, objectDecoder)); - } else { - data.push(this.parseValue(values.value, exceptionOnInvalidType, objectDecoder)); - } - } - } else if ((values = this.PATTERN_MAPPING_ITEM.exec(this.currentLine)) && values.key.indexOf(' #') === -1) { - if (this.CONTEXT_SEQUENCE === context) { - throw new ParseException('You cannot define a mapping item when in a sequence'); - } - context = this.CONTEXT_MAPPING; - if (data == null) { - data = {}; - } - Inline.configure(exceptionOnInvalidType, objectDecoder); - try { - key = Inline.parseScalar(values.key); - } catch (_error) { - e = _error; - e.parsedLine = this.getRealCurrentLineNb() + 1; - e.snippet = this.currentLine; - throw e; - } - if ('<<' === key) { - mergeNode = true; - allowOverwrite = true; - if (((ref1 = values.value) != null ? ref1.indexOf('*') : void 0) === 0) { - refName = values.value.slice(1); - if (this.refs[refName] == null) { - throw new ParseException('Reference "' + refName + '" does not exist.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - refValue = this.refs[refName]; - if (typeof refValue !== 'object') { - throw new ParseException('YAML merge keys used with a scalar value instead of an object.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - if (refValue instanceof Array) { - for (i = j = 0, len = refValue.length; j < len; i = ++j) { - value = refValue[i]; - if (data[name = String(i)] == null) { - data[name] = value; - } - } - } else { - for (key in refValue) { - value = refValue[key]; - if (data[key] == null) { - data[key] = value; - } - } - } - } else { - if ((values.value != null) && values.value !== '') { - value = values.value; - } else { - value = this.getNextEmbedBlock(); - } - c = this.getRealCurrentLineNb() + 1; - parser = new Parser(c); - parser.refs = this.refs; - parsed = parser.parse(value, exceptionOnInvalidType); - if (typeof parsed !== 'object') { - throw new ParseException('YAML merge keys used with a scalar value instead of an object.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - if (parsed instanceof Array) { - for (l = 0, len1 = parsed.length; l < len1; l++) { - parsedItem = parsed[l]; - if (typeof parsedItem !== 'object') { - throw new ParseException('Merge items must be objects.', this.getRealCurrentLineNb() + 1, parsedItem); - } - if (parsedItem instanceof Array) { - for (i = m = 0, len2 = parsedItem.length; m < len2; i = ++m) { - value = parsedItem[i]; - k = String(i); - if (!data.hasOwnProperty(k)) { - data[k] = value; - } - } - } else { - for (key in parsedItem) { - value = parsedItem[key]; - if (!data.hasOwnProperty(key)) { - data[key] = value; - } - } - } - } - } else { - for (key in parsed) { - value = parsed[key]; - if (!data.hasOwnProperty(key)) { - data[key] = value; - } - } - } - } - } else if ((values.value != null) && (matches = this.PATTERN_ANCHOR_VALUE.exec(values.value))) { - isRef = matches.ref; - values.value = matches.value; - } - if (mergeNode) { - - } else if (!(values.value != null) || '' === Utils.trim(values.value, ' ') || Utils.ltrim(values.value, ' ').indexOf('#') === 0) { - if (!(this.isNextLineIndented()) && !(this.isNextLineUnIndentedCollection())) { - if (allowOverwrite || data[key] === void 0) { - data[key] = null; - } - } else { - c = this.getRealCurrentLineNb() + 1; - parser = new Parser(c); - parser.refs = this.refs; - val = parser.parse(this.getNextEmbedBlock(), exceptionOnInvalidType, objectDecoder); - if (allowOverwrite || data[key] === void 0) { - data[key] = val; - } - } - } else { - val = this.parseValue(values.value, exceptionOnInvalidType, objectDecoder); - if (allowOverwrite || data[key] === void 0) { - data[key] = val; - } - } - } else { - lineCount = this.lines.length; - if (1 === lineCount || (2 === lineCount && Utils.isEmpty(this.lines[1]))) { - try { - value = Inline.parse(this.lines[0], exceptionOnInvalidType, objectDecoder); - } catch (_error) { - e = _error; - e.parsedLine = this.getRealCurrentLineNb() + 1; - e.snippet = this.currentLine; - throw e; - } - if (typeof value === 'object') { - if (value instanceof Array) { - first = value[0]; - } else { - for (key in value) { - first = value[key]; - break; - } - } - if (typeof first === 'string' && first.indexOf('*') === 0) { - data = []; - for (n = 0, len3 = value.length; n < len3; n++) { - alias = value[n]; - data.push(this.refs[alias.slice(1)]); - } - value = data; - } - } - return value; - } else if ((ref2 = Utils.ltrim(value).charAt(0)) === '[' || ref2 === '{') { - try { - return Inline.parse(value, exceptionOnInvalidType, objectDecoder); - } catch (_error) { - e = _error; - e.parsedLine = this.getRealCurrentLineNb() + 1; - e.snippet = this.currentLine; - throw e; - } - } - throw new ParseException('Unable to parse.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - if (isRef) { - if (data instanceof Array) { - this.refs[isRef] = data[data.length - 1]; - } else { - lastKey = null; - for (key in data) { - lastKey = key; - } - this.refs[isRef] = data[lastKey]; - } - } - } - if (Utils.isEmpty(data)) { - return null; - } else { - return data; - } - }; - - Parser.prototype.getRealCurrentLineNb = function() { - return this.currentLineNb + this.offset; - }; - - Parser.prototype.getCurrentLineIndentation = function() { - return this.currentLine.length - Utils.ltrim(this.currentLine, ' ').length; - }; - - Parser.prototype.getNextEmbedBlock = function(indentation, includeUnindentedCollection) { - var data, indent, isItUnindentedCollection, newIndent, removeComments, removeCommentsPattern, unindentedEmbedBlock; - if (indentation == null) { - indentation = null; - } - if (includeUnindentedCollection == null) { - includeUnindentedCollection = false; - } - this.moveToNextLine(); - if (indentation == null) { - newIndent = this.getCurrentLineIndentation(); - unindentedEmbedBlock = this.isStringUnIndentedCollectionItem(this.currentLine); - if (!(this.isCurrentLineEmpty()) && 0 === newIndent && !unindentedEmbedBlock) { - throw new ParseException('Indentation problem.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - } else { - newIndent = indentation; - } - data = [this.currentLine.slice(newIndent)]; - if (!includeUnindentedCollection) { - isItUnindentedCollection = this.isStringUnIndentedCollectionItem(this.currentLine); - } - removeCommentsPattern = this.PATTERN_FOLDED_SCALAR_END; - removeComments = !removeCommentsPattern.test(this.currentLine); - while (this.moveToNextLine()) { - indent = this.getCurrentLineIndentation(); - if (indent === newIndent) { - removeComments = !removeCommentsPattern.test(this.currentLine); - } - if (isItUnindentedCollection && !this.isStringUnIndentedCollectionItem(this.currentLine) && indent === newIndent) { - this.moveToPreviousLine(); - break; - } - if (this.isCurrentLineBlank()) { - data.push(this.currentLine.slice(newIndent)); - continue; - } - if (removeComments && this.isCurrentLineComment()) { - if (indent === newIndent) { - continue; - } - } - if (indent >= newIndent) { - data.push(this.currentLine.slice(newIndent)); - } else if (Utils.ltrim(this.currentLine).charAt(0) === '#') { - - } else if (0 === indent) { - this.moveToPreviousLine(); - break; - } else { - throw new ParseException('Indentation problem.', this.getRealCurrentLineNb() + 1, this.currentLine); - } - } - return data.join("\n"); - }; - - Parser.prototype.moveToNextLine = function() { - if (this.currentLineNb >= this.lines.length - 1) { - return false; - } - this.currentLine = this.lines[++this.currentLineNb]; - return true; - }; - - Parser.prototype.moveToPreviousLine = function() { - this.currentLine = this.lines[--this.currentLineNb]; - }; - - Parser.prototype.parseValue = function(value, exceptionOnInvalidType, objectDecoder) { - var e, foldedIndent, matches, modifiers, pos, ref, ref1, val; - if (0 === value.indexOf('*')) { - pos = value.indexOf('#'); - if (pos !== -1) { - value = value.substr(1, pos - 2); - } else { - value = value.slice(1); - } - if (this.refs[value] === void 0) { - throw new ParseException('Reference "' + value + '" does not exist.', this.currentLine); - } - return this.refs[value]; - } - if (matches = this.PATTERN_FOLDED_SCALAR_ALL.exec(value)) { - modifiers = (ref = matches.modifiers) != null ? ref : ''; - foldedIndent = Math.abs(parseInt(modifiers)); - if (isNaN(foldedIndent)) { - foldedIndent = 0; - } - val = this.parseFoldedScalar(matches.separator, this.PATTERN_DECIMAL.replace(modifiers, ''), foldedIndent); - if (matches.type != null) { - Inline.configure(exceptionOnInvalidType, objectDecoder); - return Inline.parseScalar(matches.type + ' ' + val); - } else { - return val; - } - } - try { - return Inline.parse(value, exceptionOnInvalidType, objectDecoder); - } catch (_error) { - e = _error; - if (((ref1 = value.charAt(0)) === '[' || ref1 === '{') && e instanceof ParseException && this.isNextLineIndented()) { - value += "\n" + this.getNextEmbedBlock(); - try { - return Inline.parse(value, exceptionOnInvalidType, objectDecoder); - } catch (_error) { - e = _error; - e.parsedLine = this.getRealCurrentLineNb() + 1; - e.snippet = this.currentLine; - throw e; - } - } else { - e.parsedLine = this.getRealCurrentLineNb() + 1; - e.snippet = this.currentLine; - throw e; - } - } - }; - - Parser.prototype.parseFoldedScalar = function(separator, indicator, indentation) { - var isCurrentLineBlank, j, len, line, matches, newText, notEOF, pattern, ref, text; - if (indicator == null) { - indicator = ''; - } - if (indentation == null) { - indentation = 0; - } - notEOF = this.moveToNextLine(); - if (!notEOF) { - return ''; - } - isCurrentLineBlank = this.isCurrentLineBlank(); - text = ''; - while (notEOF && isCurrentLineBlank) { - if (notEOF = this.moveToNextLine()) { - text += "\n"; - isCurrentLineBlank = this.isCurrentLineBlank(); - } - } - if (0 === indentation) { - if (matches = this.PATTERN_INDENT_SPACES.exec(this.currentLine)) { - indentation = matches[0].length; - } - } - if (indentation > 0) { - pattern = this.PATTERN_FOLDED_SCALAR_BY_INDENTATION[indentation]; - if (pattern == null) { - pattern = new Pattern('^ {' + indentation + '}(.*)$'); - Parser.prototype.PATTERN_FOLDED_SCALAR_BY_INDENTATION[indentation] = pattern; - } - while (notEOF && (isCurrentLineBlank || (matches = pattern.exec(this.currentLine)))) { - if (isCurrentLineBlank) { - text += this.currentLine.slice(indentation); - } else { - text += matches[1]; - } - if (notEOF = this.moveToNextLine()) { - text += "\n"; - isCurrentLineBlank = this.isCurrentLineBlank(); - } - } - } else if (notEOF) { - text += "\n"; - } - if (notEOF) { - this.moveToPreviousLine(); - } - if ('>' === separator) { - newText = ''; - ref = text.split("\n"); - for (j = 0, len = ref.length; j < len; j++) { - line = ref[j]; - if (line.length === 0 || line.charAt(0) === ' ') { - newText = Utils.rtrim(newText, ' ') + line + "\n"; - } else { - newText += line + ' '; - } - } - text = newText; - } - if ('+' !== indicator) { - text = Utils.rtrim(text); - } - if ('' === indicator) { - text = this.PATTERN_TRAILING_LINES.replace(text, "\n"); - } else if ('-' === indicator) { - text = this.PATTERN_TRAILING_LINES.replace(text, ''); - } - return text; - }; - - Parser.prototype.isNextLineIndented = function(ignoreComments) { - var EOF, currentIndentation, ret; - if (ignoreComments == null) { - ignoreComments = true; - } - currentIndentation = this.getCurrentLineIndentation(); - EOF = !this.moveToNextLine(); - if (ignoreComments) { - while (!EOF && this.isCurrentLineEmpty()) { - EOF = !this.moveToNextLine(); - } - } else { - while (!EOF && this.isCurrentLineBlank()) { - EOF = !this.moveToNextLine(); - } - } - if (EOF) { - return false; - } - ret = false; - if (this.getCurrentLineIndentation() > currentIndentation) { - ret = true; - } - this.moveToPreviousLine(); - return ret; - }; - - Parser.prototype.isCurrentLineEmpty = function() { - var trimmedLine; - trimmedLine = Utils.trim(this.currentLine, ' '); - return trimmedLine.length === 0 || trimmedLine.charAt(0) === '#'; - }; - - Parser.prototype.isCurrentLineBlank = function() { - return '' === Utils.trim(this.currentLine, ' '); - }; - - Parser.prototype.isCurrentLineComment = function() { - var ltrimmedLine; - ltrimmedLine = Utils.ltrim(this.currentLine, ' '); - return ltrimmedLine.charAt(0) === '#'; - }; - - Parser.prototype.cleanup = function(value) { - var count, i, indent, j, l, len, len1, line, lines, ref, ref1, ref2, smallestIndent, trimmedValue; - if (value.indexOf("\r") !== -1) { - value = value.split("\r\n").join("\n").split("\r").join("\n"); - } - count = 0; - ref = this.PATTERN_YAML_HEADER.replaceAll(value, ''), value = ref[0], count = ref[1]; - this.offset += count; - ref1 = this.PATTERN_LEADING_COMMENTS.replaceAll(value, '', 1), trimmedValue = ref1[0], count = ref1[1]; - if (count === 1) { - this.offset += Utils.subStrCount(value, "\n") - Utils.subStrCount(trimmedValue, "\n"); - value = trimmedValue; - } - ref2 = this.PATTERN_DOCUMENT_MARKER_START.replaceAll(value, '', 1), trimmedValue = ref2[0], count = ref2[1]; - if (count === 1) { - this.offset += Utils.subStrCount(value, "\n") - Utils.subStrCount(trimmedValue, "\n"); - value = trimmedValue; - value = this.PATTERN_DOCUMENT_MARKER_END.replace(value, ''); - } - lines = value.split("\n"); - smallestIndent = -1; - for (j = 0, len = lines.length; j < len; j++) { - line = lines[j]; - indent = line.length - Utils.ltrim(line).length; - if (smallestIndent === -1 || indent < smallestIndent) { - smallestIndent = indent; - } - } - if (smallestIndent > 0) { - for (i = l = 0, len1 = lines.length; l < len1; i = ++l) { - line = lines[i]; - lines[i] = line.slice(smallestIndent); - } - value = lines.join("\n"); - } - return value; - }; - - Parser.prototype.isNextLineUnIndentedCollection = function(currentIndentation) { - var notEOF, ret; - if (currentIndentation == null) { - currentIndentation = null; - } - if (currentIndentation == null) { - currentIndentation = this.getCurrentLineIndentation(); - } - notEOF = this.moveToNextLine(); - while (notEOF && this.isCurrentLineEmpty()) { - notEOF = this.moveToNextLine(); - } - if (false === notEOF) { - return false; - } - ret = false; - if (this.getCurrentLineIndentation() === currentIndentation && this.isStringUnIndentedCollectionItem(this.currentLine)) { - ret = true; - } - this.moveToPreviousLine(); - return ret; - }; - - Parser.prototype.isStringUnIndentedCollectionItem = function() { - return this.currentLine === '-' || this.currentLine.slice(0, 2) === '- '; - }; - - return Parser; - -})(); - -module.exports = Parser; - - - -},{"./Exception/ParseException":4,"./Inline":5,"./Pattern":7,"./Utils":9}],7:[function(require,module,exports){ -var Pattern; - -Pattern = (function() { - Pattern.prototype.regex = null; - - Pattern.prototype.rawRegex = null; - - Pattern.prototype.cleanedRegex = null; - - Pattern.prototype.mapping = null; - - function Pattern(rawRegex, modifiers) { - var _char, capturingBracketNumber, cleanedRegex, i, len, mapping, name, part, subChar; - if (modifiers == null) { - modifiers = ''; - } - cleanedRegex = ''; - len = rawRegex.length; - mapping = null; - capturingBracketNumber = 0; - i = 0; - while (i < len) { - _char = rawRegex.charAt(i); - if (_char === '\\') { - cleanedRegex += rawRegex.slice(i, +(i + 1) + 1 || 9e9); - i++; - } else if (_char === '(') { - if (i < len - 2) { - part = rawRegex.slice(i, +(i + 2) + 1 || 9e9); - if (part === '(?:') { - i += 2; - cleanedRegex += part; - } else if (part === '(?<') { - capturingBracketNumber++; - i += 2; - name = ''; - while (i + 1 < len) { - subChar = rawRegex.charAt(i + 1); - if (subChar === '>') { - cleanedRegex += '('; - i++; - if (name.length > 0) { - if (mapping == null) { - mapping = {}; - } - mapping[name] = capturingBracketNumber; - } - break; - } else { - name += subChar; - } - i++; - } - } else { - cleanedRegex += _char; - capturingBracketNumber++; - } - } else { - cleanedRegex += _char; - } - } else { - cleanedRegex += _char; - } - i++; - } - this.rawRegex = rawRegex; - this.cleanedRegex = cleanedRegex; - this.regex = new RegExp(this.cleanedRegex, 'g' + modifiers.replace('g', '')); - this.mapping = mapping; - } - - Pattern.prototype.exec = function(str) { - var index, matches, name, ref; - this.regex.lastIndex = 0; - matches = this.regex.exec(str); - if (matches == null) { - return null; - } - if (this.mapping != null) { - ref = this.mapping; - for (name in ref) { - index = ref[name]; - matches[name] = matches[index]; - } - } - return matches; - }; - - Pattern.prototype.test = function(str) { - this.regex.lastIndex = 0; - return this.regex.test(str); - }; - - Pattern.prototype.replace = function(str, replacement) { - this.regex.lastIndex = 0; - return str.replace(this.regex, replacement); - }; - - Pattern.prototype.replaceAll = function(str, replacement, limit) { - var count; - if (limit == null) { - limit = 0; - } - this.regex.lastIndex = 0; - count = 0; - while (this.regex.test(str) && (limit === 0 || count < limit)) { - this.regex.lastIndex = 0; - str = str.replace(this.regex, ''); - count++; - } - return [str, count]; - }; - - return Pattern; - -})(); - -module.exports = Pattern; - - - -},{}],8:[function(require,module,exports){ -var Pattern, Unescaper, Utils; - -Utils = require('./Utils'); - -Pattern = require('./Pattern'); - -Unescaper = (function() { - function Unescaper() {} - - Unescaper.PATTERN_ESCAPED_CHARACTER = new Pattern('\\\\([0abt\tnvfre "\\/\\\\N_LP]|x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8})'); - - Unescaper.unescapeSingleQuotedString = function(value) { - return value.replace(/\'\'/g, '\''); - }; - - Unescaper.unescapeDoubleQuotedString = function(value) { - if (this._unescapeCallback == null) { - this._unescapeCallback = (function(_this) { - return function(str) { - return _this.unescapeCharacter(str); - }; - })(this); - } - return this.PATTERN_ESCAPED_CHARACTER.replace(value, this._unescapeCallback); - }; - - Unescaper.unescapeCharacter = function(value) { - var ch; - ch = String.fromCharCode; - switch (value.charAt(1)) { - case '0': - return ch(0); - case 'a': - return ch(7); - case 'b': - return ch(8); - case 't': - return "\t"; - case "\t": - return "\t"; - case 'n': - return "\n"; - case 'v': - return ch(11); - case 'f': - return ch(12); - case 'r': - return ch(13); - case 'e': - return ch(27); - case ' ': - return ' '; - case '"': - return '"'; - case '/': - return '/'; - case '\\': - return '\\'; - case 'N': - return ch(0x0085); - case '_': - return ch(0x00A0); - case 'L': - return ch(0x2028); - case 'P': - return ch(0x2029); - case 'x': - return Utils.utf8chr(Utils.hexDec(value.substr(2, 2))); - case 'u': - return Utils.utf8chr(Utils.hexDec(value.substr(2, 4))); - case 'U': - return Utils.utf8chr(Utils.hexDec(value.substr(2, 8))); - default: - return ''; - } - }; - - return Unescaper; - -})(); - -module.exports = Unescaper; - - - -},{"./Pattern":7,"./Utils":9}],9:[function(require,module,exports){ -var Pattern, Utils; - -Pattern = require('./Pattern'); - -Utils = (function() { - function Utils() {} - - Utils.REGEX_LEFT_TRIM_BY_CHAR = {}; - - Utils.REGEX_RIGHT_TRIM_BY_CHAR = {}; - - Utils.REGEX_SPACES = /\s+/g; - - Utils.REGEX_DIGITS = /^\d+$/; - - Utils.REGEX_OCTAL = /[^0-7]/gi; - - Utils.REGEX_HEXADECIMAL = /[^a-f0-9]/gi; - - Utils.PATTERN_DATE = new Pattern('^' + '(?[0-9][0-9][0-9][0-9])' + '-(?[0-9][0-9]?)' + '-(?[0-9][0-9]?)' + '(?:(?:[Tt]|[ \t]+)' + '(?[0-9][0-9]?)' + ':(?[0-9][0-9])' + ':(?[0-9][0-9])' + '(?:\.(?[0-9]*))?' + '(?:[ \t]*(?Z|(?[-+])(?[0-9][0-9]?)' + '(?::(?[0-9][0-9]))?))?)?' + '$', 'i'); - - Utils.LOCAL_TIMEZONE_OFFSET = new Date().getTimezoneOffset() * 60 * 1000; - - Utils.trim = function(str, _char) { - var regexLeft, regexRight; - if (_char == null) { - _char = '\\s'; - } - return str.trim(); - regexLeft = this.REGEX_LEFT_TRIM_BY_CHAR[_char]; - if (regexLeft == null) { - this.REGEX_LEFT_TRIM_BY_CHAR[_char] = regexLeft = new RegExp('^' + _char + '' + _char + '*'); - } - regexLeft.lastIndex = 0; - regexRight = this.REGEX_RIGHT_TRIM_BY_CHAR[_char]; - if (regexRight == null) { - this.REGEX_RIGHT_TRIM_BY_CHAR[_char] = regexRight = new RegExp(_char + '' + _char + '*$'); - } - regexRight.lastIndex = 0; - return str.replace(regexLeft, '').replace(regexRight, ''); - }; - - Utils.ltrim = function(str, _char) { - var regexLeft; - if (_char == null) { - _char = '\\s'; - } - regexLeft = this.REGEX_LEFT_TRIM_BY_CHAR[_char]; - if (regexLeft == null) { - this.REGEX_LEFT_TRIM_BY_CHAR[_char] = regexLeft = new RegExp('^' + _char + '' + _char + '*'); - } - regexLeft.lastIndex = 0; - return str.replace(regexLeft, ''); - }; - - Utils.rtrim = function(str, _char) { - var regexRight; - if (_char == null) { - _char = '\\s'; - } - regexRight = this.REGEX_RIGHT_TRIM_BY_CHAR[_char]; - if (regexRight == null) { - this.REGEX_RIGHT_TRIM_BY_CHAR[_char] = regexRight = new RegExp(_char + '' + _char + '*$'); - } - regexRight.lastIndex = 0; - return str.replace(regexRight, ''); - }; - - Utils.isEmpty = function(value) { - return !value || value === '' || value === '0' || (value instanceof Array && value.length === 0); - }; - - Utils.subStrCount = function(string, subString, start, length) { - var c, i, j, len, ref, sublen; - c = 0; - string = '' + string; - subString = '' + subString; - if (start != null) { - string = string.slice(start); - } - if (length != null) { - string = string.slice(0, length); - } - len = string.length; - sublen = subString.length; - for (i = j = 0, ref = len; 0 <= ref ? j < ref : j > ref; i = 0 <= ref ? ++j : --j) { - if (subString === string.slice(i, sublen)) { - c++; - i += sublen - 1; - } - } - return c; - }; - - Utils.isDigits = function(input) { - this.REGEX_DIGITS.lastIndex = 0; - return this.REGEX_DIGITS.test(input); - }; - - Utils.octDec = function(input) { - this.REGEX_OCTAL.lastIndex = 0; - return parseInt((input + '').replace(this.REGEX_OCTAL, ''), 8); - }; - - Utils.hexDec = function(input) { - this.REGEX_HEXADECIMAL.lastIndex = 0; - input = this.trim(input); - if ((input + '').slice(0, 2) === '0x') { - input = (input + '').slice(2); - } - return parseInt((input + '').replace(this.REGEX_HEXADECIMAL, ''), 16); - }; - - Utils.utf8chr = function(c) { - var ch; - ch = String.fromCharCode; - if (0x80 > (c %= 0x200000)) { - return ch(c); - } - if (0x800 > c) { - return ch(0xC0 | c >> 6) + ch(0x80 | c & 0x3F); - } - if (0x10000 > c) { - return ch(0xE0 | c >> 12) + ch(0x80 | c >> 6 & 0x3F) + ch(0x80 | c & 0x3F); - } - return ch(0xF0 | c >> 18) + ch(0x80 | c >> 12 & 0x3F) + ch(0x80 | c >> 6 & 0x3F) + ch(0x80 | c & 0x3F); - }; - - Utils.parseBoolean = function(input, strict) { - var lowerInput; - if (strict == null) { - strict = true; - } - if (typeof input === 'string') { - lowerInput = input.toLowerCase(); - if (!strict) { - if (lowerInput === 'no') { - return false; - } - } - if (lowerInput === '0') { - return false; - } - if (lowerInput === 'false') { - return false; - } - if (lowerInput === '') { - return false; - } - return true; - } - return !!input; - }; - - Utils.isNumeric = function(input) { - this.REGEX_SPACES.lastIndex = 0; - return typeof input === 'number' || typeof input === 'string' && !isNaN(input) && input.replace(this.REGEX_SPACES, '') !== ''; - }; - - Utils.stringToDate = function(str) { - var date, day, fraction, hour, info, minute, month, second, tz_hour, tz_minute, tz_offset, year; - if (!(str != null ? str.length : void 0)) { - return null; - } - info = this.PATTERN_DATE.exec(str); - if (!info) { - return null; - } - year = parseInt(info.year, 10); - month = parseInt(info.month, 10) - 1; - day = parseInt(info.day, 10); - if (info.hour == null) { - date = new Date(Date.UTC(year, month, day)); - return date; - } - hour = parseInt(info.hour, 10); - minute = parseInt(info.minute, 10); - second = parseInt(info.second, 10); - if (info.fraction != null) { - fraction = info.fraction.slice(0, 3); - while (fraction.length < 3) { - fraction += '0'; - } - fraction = parseInt(fraction, 10); - } else { - fraction = 0; - } - if (info.tz != null) { - tz_hour = parseInt(info.tz_hour, 10); - if (info.tz_minute != null) { - tz_minute = parseInt(info.tz_minute, 10); - } else { - tz_minute = 0; - } - tz_offset = (tz_hour * 60 + tz_minute) * 60000; - if ('-' === info.tz_sign) { - tz_offset *= -1; - } - } - date = new Date(Date.UTC(year, month, day, hour, minute, second, fraction)); - if (tz_offset) { - date.setTime(date.getTime() + tz_offset); - } - return date; - }; - - Utils.strRepeat = function(str, number) { - var i, res; - res = ''; - i = 0; - while (i < number) { - res += str; - i++; - } - return res; - }; - - Utils.getStringFromFile = function(path, callback) { - var data, fs, j, len1, name, ref, req, xhr; - if (callback == null) { - callback = null; - } - xhr = null; - if (typeof window !== "undefined" && window !== null) { - if (window.XMLHttpRequest) { - xhr = new XMLHttpRequest(); - } else if (window.ActiveXObject) { - ref = ["Msxml2.XMLHTTP.6.0", "Msxml2.XMLHTTP.3.0", "Msxml2.XMLHTTP", "Microsoft.XMLHTTP"]; - for (j = 0, len1 = ref.length; j < len1; j++) { - name = ref[j]; - try { - xhr = new ActiveXObject(name); - } catch (_error) {} - } - } - } - if (xhr != null) { - if (callback != null) { - xhr.onreadystatechange = function() { - if (xhr.readyState === 4) { - if (xhr.status === 200 || xhr.status === 0) { - return callback(xhr.responseText); - } else { - return callback(null); - } - } - }; - xhr.open('GET', path, true); - return xhr.send(null); - } else { - xhr.open('GET', path, false); - xhr.send(null); - if (xhr.status === 200 || xhr.status === 0) { - return xhr.responseText; - } - return null; - } - } else { - req = require; - fs = req('fs'); - if (callback != null) { - return fs.readFile(path, function(err, data) { - if (err) { - return callback(null); - } else { - return callback(String(data)); - } - }); - } else { - data = fs.readFileSync(path); - if (data != null) { - return String(data); - } - return null; - } - } - }; - - return Utils; - -})(); - -module.exports = Utils; - - - -},{"./Pattern":7}],10:[function(require,module,exports){ -var Dumper, Parser, Utils, Yaml; - -Parser = require('./Parser'); - -Dumper = require('./Dumper'); - -Utils = require('./Utils'); - -Yaml = (function() { - function Yaml() {} - - Yaml.parse = function(input, exceptionOnInvalidType, objectDecoder) { - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectDecoder == null) { - objectDecoder = null; - } - return new Parser().parse(input, exceptionOnInvalidType, objectDecoder); - }; - - Yaml.parseFile = function(path, callback, exceptionOnInvalidType, objectDecoder) { - var input; - if (callback == null) { - callback = null; - } - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectDecoder == null) { - objectDecoder = null; - } - if (callback != null) { - return Utils.getStringFromFile(path, (function(_this) { - return function(input) { - var result; - result = null; - if (input != null) { - result = _this.parse(input, exceptionOnInvalidType, objectDecoder); - } - callback(result); - }; - })(this)); - } else { - input = Utils.getStringFromFile(path); - if (input != null) { - return this.parse(input, exceptionOnInvalidType, objectDecoder); - } - return null; - } - }; - - Yaml.dump = function(input, inline, indent, exceptionOnInvalidType, objectEncoder) { - var yaml; - if (inline == null) { - inline = 2; - } - if (indent == null) { - indent = 4; - } - if (exceptionOnInvalidType == null) { - exceptionOnInvalidType = false; - } - if (objectEncoder == null) { - objectEncoder = null; - } - yaml = new Dumper(); - yaml.indentation = indent; - return yaml.dump(input, inline, 0, exceptionOnInvalidType, objectEncoder); - }; - - Yaml.register = function() { - var require_handler; - require_handler = function(module, filename) { - return module.exports = YAML.parseFile(filename); - }; - if ((typeof require !== "undefined" && require !== null ? require.extensions : void 0) != null) { - require.extensions['.yml'] = require_handler; - return require.extensions['.yaml'] = require_handler; - } - }; - - Yaml.stringify = function(input, inline, indent, exceptionOnInvalidType, objectEncoder) { - return this.dump(input, inline, indent, exceptionOnInvalidType, objectEncoder); - }; - - Yaml.load = function(path, callback, exceptionOnInvalidType, objectDecoder) { - return this.parseFile(path, callback, exceptionOnInvalidType, objectDecoder); - }; - - return Yaml; - -})(); - -if (typeof window !== "undefined" && window !== null) { - window.YAML = Yaml; -} - -if (typeof window === "undefined" || window === null) { - this.YAML = Yaml; -} - -module.exports = Yaml; - - - -},{"./Dumper":1,"./Parser":6,"./Utils":9}]},{},[10]); diff --git a/states/roles/maintain/sendmail/highstate.service b/states/roles/maintain/sendmail/highstate.service deleted file mode 100644 index e2e9c7b..0000000 --- a/states/roles/maintain/sendmail/highstate.service +++ /dev/null @@ -1,10 +0,0 @@ -[Unit] -Description=Runs state.highstate - -[Service] -Type=oneshot -RemainAfterExit=no -ExecStart=/bin/bash /root/scripts/highstate.sh - -[Install] -WantedBy=multi-user.target diff --git a/states/roles/maintain/sendmail/highstate.sh b/states/roles/maintain/sendmail/highstate.sh deleted file mode 100644 index 0168baf..0000000 --- a/states/roles/maintain/sendmail/highstate.sh +++ /dev/null @@ -1,3 +0,0 @@ -echo "Running highstate `date`" > /root/scripts/highstate.log -/usr/bin/salt-call state.highstate >> /root/scripts/highstate.log -echo "Finished highstate" >> /root/scripts/highstate.log diff --git a/states/roles/maintain/sendmail/highstate.timer b/states/roles/maintain/sendmail/highstate.timer deleted file mode 100644 index 2abbcdd..0000000 --- a/states/roles/maintain/sendmail/highstate.timer +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Runs state.highstate every hour - -[Timer] -# Time to wait after booting before we run first time -OnBootSec=10min -# Time between running each consecutive time -OnUnitActiveSec=1h -Unit=highstate.service - -[Install] -WantedBy=multi-user.target - diff --git a/states/roles/maintain/sendmail/init.sls b/states/roles/maintain/sendmail/init.sls deleted file mode 100644 index b81d0b0..0000000 --- a/states/roles/maintain/sendmail/init.sls +++ /dev/null @@ -1,16 +0,0 @@ -{%- set os=grains['os'] -%} - -postfix: - pkg.installed: - - pkgs: - - postfix -# service.running: -# - enable: true - -#/etc/salt/minion: -# file.managed: -# - source: salt://roles/maintain/saltminion/minion -# - user: root -# - group: root -# - mode: 644 -# - template: jinja diff --git a/states/roles/maintain/sendmail/minion b/states/roles/maintain/sendmail/minion deleted file mode 100644 index 6fedb22..0000000 --- a/states/roles/maintain/sendmail/minion +++ /dev/null @@ -1,629 +0,0 @@ -##### Primary configuration settings ##### -########################################## -# This configuration file is used to manage the behavior of the Salt Minion. -# With the exception of the location of the Salt Master Server, values that are -# commented out but have an empty line after the comment are defaults that need -# not be set in the config. If there is no blank line after the comment, the -# value is presented as an example and is not the default. - -# Per default the minion will automatically include all config files -# from minion.d/*.conf (minion.d is a directory in the same directory -# as the main minion config file). -#default_include: minion.d/*.conf - -# Set the location of the salt master server. If the master server cannot be -# resolved, then the minion will fail to start. -master: salt.actcur.com - -# If multiple masters are specified in the 'master' setting, the default behavior -# is to always try to connect to them in the order they are listed. If random_master is -# set to True, the order will be randomized instead. This can be helpful in distributing -# the load of many minions executing salt-call requests, for example, from a cron job. -# If only one master is listed, this setting is ignored and a warning will be logged. -#random_master: False - -# Set whether the minion should connect to the master via IPv6: -#ipv6: False - -# Set the number of seconds to wait before attempting to resolve -# the master hostname if name resolution fails. Defaults to 30 seconds. -# Set to zero if the minion should shutdown and not retry. -# retry_dns: 30 - -# Set the port used by the master reply and authentication server. -#master_port: 4506 - -# The user to run salt. -#user: root - -# Specify the location of the daemon process ID file. -#pidfile: /var/run/salt-minion.pid - -# The root directory prepended to these options: pki_dir, cachedir, log_file, -# sock_dir, pidfile. -#root_dir: / - -# The directory to store the pki information in -#pki_dir: /etc/salt/pki/minion - -# Explicitly declare the id for this minion to use, if left commented the id -# will be the hostname as returned by the python call: socket.getfqdn() -# Since salt uses detached ids it is possible to run multiple minions on the -# same machine but with different ids, this can be useful for salt compute -# clusters. -#id: - -# Append a domain to a hostname in the event that it does not exist. This is -# useful for systems where socket.getfqdn() does not actually result in a -# FQDN (for instance, Solaris). -#append_domain: - -# Custom static grains for this minion can be specified here and used in SLS -# files just like all other grains. This example sets 4 custom grains, with -# the 'roles' grain having two values that can be matched against. -#grains: -# roles: -# - webserver -# - memcache -# deployment: datacenter4 -# cabinet: 13 -# cab_u: 14-15 -# -# Where cache data goes. -#cachedir: /var/cache/salt/minion - -# Verify and set permissions on configuration directories at startup. -#verify_env: True - -# The minion can locally cache the return data from jobs sent to it, this -# can be a good way to keep track of jobs the minion has executed -# (on the minion side). By default this feature is disabled, to enable, set -# cache_jobs to True. -#cache_jobs: False - -# Set the directory used to hold unix sockets. -#sock_dir: /var/run/salt/minion - -# Set the default outputter used by the salt-call command. The default is -# "nested". -#output: nested -# -# By default output is colored. To disable colored output, set the color value -# to False. -#color: True - -# Do not strip off the colored output from nested results and state outputs -# (true by default). -# strip_colors: False - -# Backup files that are replaced by file.managed and file.recurse under -# 'cachedir'/file_backups relative to their original location and appended -# with a timestamp. The only valid setting is "minion". Disabled by default. -# -# Alternatively this can be specified for each file in state files: -# /etc/ssh/sshd_config: -# file.managed: -# - source: salt://ssh/sshd_config -# - backup: minion -# -#backup_mode: minion - -# When waiting for a master to accept the minion's public key, salt will -# continuously attempt to reconnect until successful. This is the time, in -# seconds, between those reconnection attempts. -#acceptance_wait_time: 10 - -# If this is nonzero, the time between reconnection attempts will increase by -# acceptance_wait_time seconds per iteration, up to this maximum. If this is -# set to zero, the time between reconnection attempts will stay constant. -#acceptance_wait_time_max: 0 - -# If the master rejects the minion's public key, retry instead of exiting. -# Rejected keys will be handled the same as waiting on acceptance. -#rejected_retry: False - -# When the master key changes, the minion will try to re-auth itself to receive -# the new master key. In larger environments this can cause a SYN flood on the -# master because all minions try to re-auth immediately. To prevent this and -# have a minion wait for a random amount of time, use this optional parameter. -# The wait-time will be a random number of seconds between 0 and the defined value. -#random_reauth_delay: 60 - -# When waiting for a master to accept the minion's public key, salt will -# continuously attempt to reconnect until successful. This is the timeout value, -# in seconds, for each individual attempt. After this timeout expires, the minion -# will wait for acceptance_wait_time seconds before trying again. Unless your master -# is under unusually heavy load, this should be left at the default. -#auth_timeout: 60 - -# Number of consecutive SaltReqTimeoutError that are acceptable when trying to -# authenticate. -#auth_tries: 7 - -# If authentication fails due to SaltReqTimeoutError during a ping_interval, -# cause sub minion process to restart. -#auth_safemode: False - -# Ping Master to ensure connection is alive (minutes). -#ping_interval: 0 - -# To auto recover minions if master changes IP address (DDNS) -# auth_tries: 10 -# auth_safemode: False -# ping_interval: 90 -# -# Minions won't know master is missing until a ping fails. After the ping fail, -# the minion will attempt authentication and likely fails out and cause a restart. -# When the minion restarts it will resolve the masters IP and attempt to reconnect. - -# If you don't have any problems with syn-floods, don't bother with the -# three recon_* settings described below, just leave the defaults! -# -# The ZeroMQ pull-socket that binds to the masters publishing interface tries -# to reconnect immediately, if the socket is disconnected (for example if -# the master processes are restarted). In large setups this will have all -# minions reconnect immediately which might flood the master (the ZeroMQ-default -# is usually a 100ms delay). To prevent this, these three recon_* settings -# can be used. -# recon_default: the interval in milliseconds that the socket should wait before -# trying to reconnect to the master (1000ms = 1 second) -# -# recon_max: the maximum time a socket should wait. each interval the time to wait -# is calculated by doubling the previous time. if recon_max is reached, -# it starts again at recon_default. Short example: -# -# reconnect 1: the socket will wait 'recon_default' milliseconds -# reconnect 2: 'recon_default' * 2 -# reconnect 3: ('recon_default' * 2) * 2 -# reconnect 4: value from previous interval * 2 -# reconnect 5: value from previous interval * 2 -# reconnect x: if value >= recon_max, it starts again with recon_default -# -# recon_randomize: generate a random wait time on minion start. The wait time will -# be a random value between recon_default and recon_default + -# recon_max. Having all minions reconnect with the same recon_default -# and recon_max value kind of defeats the purpose of being able to -# change these settings. If all minions have the same values and your -# setup is quite large (several thousand minions), they will still -# flood the master. The desired behavior is to have timeframe within -# all minions try to reconnect. -# -# Example on how to use these settings. The goal: have all minions reconnect within a -# 60 second timeframe on a disconnect. -# recon_default: 1000 -# recon_max: 59000 -# recon_randomize: True -# -# Each minion will have a randomized reconnect value between 'recon_default' -# and 'recon_default + recon_max', which in this example means between 1000ms -# 60000ms (or between 1 and 60 seconds). The generated random-value will be -# doubled after each attempt to reconnect. Lets say the generated random -# value is 11 seconds (or 11000ms). -# reconnect 1: wait 11 seconds -# reconnect 2: wait 22 seconds -# reconnect 3: wait 33 seconds -# reconnect 4: wait 44 seconds -# reconnect 5: wait 55 seconds -# reconnect 6: wait time is bigger than 60 seconds (recon_default + recon_max) -# reconnect 7: wait 11 seconds -# reconnect 8: wait 22 seconds -# reconnect 9: wait 33 seconds -# reconnect x: etc. -# -# In a setup with ~6000 thousand hosts these settings would average the reconnects -# to about 100 per second and all hosts would be reconnected within 60 seconds. -# recon_default: 100 -# recon_max: 5000 -# recon_randomize: False -# -# -# The loop_interval sets how long in seconds the minion will wait between -# evaluating the scheduler and running cleanup tasks. This defaults to a -# sane 60 seconds, but if the minion scheduler needs to be evaluated more -# often lower this value -#loop_interval: 60 - -# The grains_refresh_every setting allows for a minion to periodically check -# its grains to see if they have changed and, if so, to inform the master -# of the new grains. This operation is moderately expensive, therefore -# care should be taken not to set this value too low. -# -# Note: This value is expressed in __minutes__! -# -# A value of 10 minutes is a reasonable default. -# -# If the value is set to zero, this check is disabled. -#grains_refresh_every: 1 - -# Cache grains on the minion. Default is False. -#grains_cache: False - -# Grains cache expiration, in seconds. If the cache file is older than this -# number of seconds then the grains cache will be dumped and fully re-populated -# with fresh data. Defaults to 5 minutes. Will have no effect if 'grains_cache' -# is not enabled. -# grains_cache_expiration: 300 - -# Windows platforms lack posix IPC and must rely on slower TCP based inter- -# process communications. Set ipc_mode to 'tcp' on such systems -#ipc_mode: ipc - -# Overwrite the default tcp ports used by the minion when in tcp mode -#tcp_pub_port: 4510 -#tcp_pull_port: 4511 - -# Passing very large events can cause the minion to consume large amounts of -# memory. This value tunes the maximum size of a message allowed onto the -# minion event bus. The value is expressed in bytes. -#max_event_size: 1048576 - -# To detect failed master(s) and fire events on connect/disconnect, set -# master_alive_interval to the number of seconds to poll the masters for -# connection events. -# -#master_alive_interval: 30 - -# The minion can include configuration from other files. To enable this, -# pass a list of paths to this option. The paths can be either relative or -# absolute; if relative, they are considered to be relative to the directory -# the main minion configuration file lives in (this file). Paths can make use -# of shell-style globbing. If no files are matched by a path passed to this -# option then the minion will log a warning message. -# -# Include a config file from some other path: -# include: /etc/salt/extra_config -# -# Include config from several files and directories: -#include: -# - /etc/salt/extra_config -# - /etc/roles/webserver -# -# -# -##### Minion module management ##### -########################################## -# Disable specific modules. This allows the admin to limit the level of -# access the master has to the minion. -#disable_modules: [cmd,test] -#disable_returners: [] -# -# Modules can be loaded from arbitrary paths. This enables the easy deployment -# of third party modules. Modules for returners and minions can be loaded. -# Specify a list of extra directories to search for minion modules and -# returners. These paths must be fully qualified! -#module_dirs: [] -#returner_dirs: [] -#states_dirs: [] -#render_dirs: [] -#utils_dirs: [] -# -# A module provider can be statically overwritten or extended for the minion -# via the providers option, in this case the default module will be -# overwritten by the specified module. In this example the pkg module will -# be provided by the yumpkg5 module instead of the system default. -#providers: -# pkg: yumpkg5 -# -# Enable Cython modules searching and loading. (Default: False) -#cython_enable: False -# -# Specify a max size (in bytes) for modules on import. This feature is currently -# only supported on *nix operating systems and requires psutil. -# modules_max_memory: -1 - - -##### State Management Settings ##### -########################################### -# The state management system executes all of the state templates on the minion -# to enable more granular control of system state management. The type of -# template and serialization used for state management needs to be configured -# on the minion, the default renderer is yaml_jinja. This is a yaml file -# rendered from a jinja template, the available options are: -# yaml_jinja -# yaml_mako -# yaml_wempy -# json_jinja -# json_mako -# json_wempy -# -#renderer: yaml_jinja -# -# The failhard option tells the minions to stop immediately after the first -# failure detected in the state execution. Defaults to False. -#failhard: False -# -# autoload_dynamic_modules turns on automatic loading of modules found in the -# environments on the master. This is turned on by default. To turn of -# autoloading modules when states run, set this value to False. -#autoload_dynamic_modules: True -# -# clean_dynamic_modules keeps the dynamic modules on the minion in sync with -# the dynamic modules on the master, this means that if a dynamic module is -# not on the master it will be deleted from the minion. By default, this is -# enabled and can be disabled by changing this value to False. -#clean_dynamic_modules: True -# -# Normally, the minion is not isolated to any single environment on the master -# when running states, but the environment can be isolated on the minion side -# by statically setting it. Remember that the recommended way to manage -# environments is to isolate via the top file. -{%- set env="dev" -%} -{%- if pillar['env'] is defined -%} - {%- set env=pillar['env'] -%} -{%- endif %} -environment: {{ env }} -# -# If using the local file directory, then the state top file name needs to be -# defined, by default this is top.sls. -#state_top: top.sls -# -# Run states when the minion daemon starts. To enable, set startup_states to: -# 'highstate' -- Execute state.highstate -# 'sls' -- Read in the sls_list option and execute the named sls files -# 'top' -- Read top_file option and execute based on that file on the Master -#startup_states: '' -# -# List of states to run when the minion starts up if startup_states is 'sls': -#sls_list: -# - edit.vim -# - hyper -# -# Top file to execute if startup_states is 'top': -#top_file: '' - -# Automatically aggregate all states that have support for mod_aggregate by -# setting to True. Or pass a list of state module names to automatically -# aggregate just those types. -# -# state_aggregate: -# - pkg -# -#state_aggregate: False - -##### File Directory Settings ##### -########################################## -# The Salt Minion can redirect all file server operations to a local directory, -# this allows for the same state tree that is on the master to be used if -# copied completely onto the minion. This is a literal copy of the settings on -# the master but used to reference a local directory on the minion. - -# Set the file client. The client defaults to looking on the master server for -# files, but can be directed to look at the local file directory setting -# defined below by setting it to local. -#file_client: remote - -# The file directory works on environments passed to the minion, each environment -# can have multiple root directories, the subdirectories in the multiple file -# roots cannot match, otherwise the downloaded files will not be able to be -# reliably ensured. A base environment is required to house the top file. -# Example: -# file_roots: -# base: -# - /srv/salt/ -# dev: -# - /srv/salt/dev/services -# - /srv/salt/dev/states -# prod: -# - /srv/salt/prod/services -# - /srv/salt/prod/states -# -#file_roots: -# base: -# - /srv/salt -file_roots: - base: - - /srv/salt/prod/states - prod: - - /srv/salt/prod/states - dev: - - /srv/salt/dev/states - -# By default, the Salt fileserver recurses fully into all defined environments -# to attempt to find files. To limit this behavior so that the fileserver only -# traverses directories with SLS files and special Salt directories like _modules, -# enable the option below. This might be useful for installations where a file root -# has a very large number of files and performance is negatively impacted. Default -# is False. -#fileserver_limit_traversal: False - -# The hash_type is the hash to use when discovering the hash of a file in -# the local fileserver. The default is md5, but sha1, sha224, sha256, sha384 -# and sha512 are also supported. -# -# Warning: Prior to changing this value, the minion should be stopped and all -# Salt caches should be cleared. -#hash_type: md5 - -# The Salt pillar is searched for locally if file_client is set to local. If -# this is the case, and pillar data is defined, then the pillar_roots need to -# also be configured on the minion: -#pillar_roots: -# base: -# - /srv/pillar -# -pillar_roots: - base: - - /srv/salt/prod/pillar - prod: - - /srv/salt/prod/pillar - dev: - - /srv/salt/dev/pillar -# -###### Security settings ##### -########################################### -# Enable "open mode", this mode still maintains encryption, but turns off -# authentication, this is only intended for highly secure environments or for -# the situation where your keys end up in a bad state. If you run in open mode -# you do so at your own risk! -#open_mode: False - -# Enable permissive access to the salt keys. This allows you to run the -# master or minion as root, but have a non-root group be given access to -# your pki_dir. To make the access explicit, root must belong to the group -# you've given access to. This is potentially quite insecure. -#permissive_pki_access: False - -# The state_verbose and state_output settings can be used to change the way -# state system data is printed to the display. By default all data is printed. -# The state_verbose setting can be set to True or False, when set to False -# all data that has a result of True and no changes will be suppressed. -#state_verbose: True - -# The state_output setting changes if the output is the full multi line -# output for each changed state if set to 'full', but if set to 'terse' -# the output will be shortened to a single line. -state_output: mixed - -# The state_output_diff setting changes whether or not the output from -# successful states is returned. Useful when even the terse output of these -# states is cluttering the logs. Set it to True to ignore them. -#state_output_diff: False - -# Fingerprint of the master public key to double verify the master is valid, -# the master fingerprint can be found by running "salt-key -F master" on the -# salt master. -#master_finger: '' - - -###### Thread settings ##### -########################################### -# Disable multiprocessing support, by default when a minion receives a -# publication a new process is spawned and the command is executed therein. -#multiprocessing: True - - -##### Logging settings ##### -########################################## -# The location of the minion log file -# The minion log can be sent to a regular file, local path name, or network -# location. Remote logging works best when configured to use rsyslogd(8) (e.g.: -# ``file:///dev/log``), with rsyslogd(8) configured for network logging. The URI -# format is: ://:/ -#log_file: /var/log/salt/minion -#log_file: file:///dev/log -#log_file: udp://loghost:10514 -# -#log_file: /var/log/salt/minion -#key_logfile: /var/log/salt/key - -# The level of messages to send to the console. -# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'. -# Default: 'warning' -#log_level: warning - -# The level of messages to send to the log file. -# One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'. -# If using 'log_granular_levels' this must be set to the highest desired level. -# Default: 'warning' -#log_level_logfile: - -# The date and time format used in log messages. Allowed date/time formating -# can be seen here: http://docs.python.org/library/time.html#time.strftime -#log_datefmt: '%H:%M:%S' -#log_datefmt_logfile: '%Y-%m-%d %H:%M:%S' - -# The format of the console logging messages. Allowed formatting options can -# be seen here: http://docs.python.org/library/logging.html#logrecord-attributes -#log_fmt_console: '[%(levelname)-8s] %(message)s' -#log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s' - -# This can be used to control logging levels more specificically. This -# example sets the main salt library at the 'warning' level, but sets -# 'salt.modules' to log at the 'debug' level: -# log_granular_levels: -# 'salt': 'warning' -# 'salt.modules': 'debug' -# -#log_granular_levels: {} - -# To diagnose issues with minions disconnecting or missing returns, ZeroMQ -# supports the use of monitor sockets # to log connection events. This -# feature requires ZeroMQ 4.0 or higher. -# -# To enable ZeroMQ monitor sockets, set 'zmq_monitor' to 'True' and log at a -# debug level or higher. -# -# A sample log event is as follows: -# -# [DEBUG ] ZeroMQ event: {'endpoint': 'tcp://127.0.0.1:4505', 'event': 512, -# 'value': 27, 'description': 'EVENT_DISCONNECTED'} -# -# All events logged will include the string 'ZeroMQ event'. A connection event -# should be logged on the as the minion starts up and initially connects to the -# master. If not, check for debug log level and that the necessary version of -# ZeroMQ is installed. -# -#zmq_monitor: False - -###### Module configuration ##### -########################################### -# Salt allows for modules to be passed arbitrary configuration data, any data -# passed here in valid yaml format will be passed on to the salt minion modules -# for use. It is STRONGLY recommended that a naming convention be used in which -# the module name is followed by a . and then the value. Also, all top level -# data must be applied via the yaml dict construct, some examples: -# -# You can specify that all modules should run in test mode: -#test: True -# -# A simple value for the test module: -#test.foo: foo -# -# A list for the test module: -#test.bar: [baz,quo] -# -# A dict for the test module: -#test.baz: {spam: sausage, cheese: bread} -# -# -###### Update settings ###### -########################################### -# Using the features in Esky, a salt minion can both run as a frozen app and -# be updated on the fly. These options control how the update process -# (saltutil.update()) behaves. -# -# The url for finding and downloading updates. Disabled by default. -#update_url: False -# -# The list of services to restart after a successful update. Empty by default. -#update_restart_services: [] - - -###### Keepalive settings ###### -############################################ -# ZeroMQ now includes support for configuring SO_KEEPALIVE if supported by -# the OS. If connections between the minion and the master pass through -# a state tracking device such as a firewall or VPN gateway, there is -# the risk that it could tear down the connection the master and minion -# without informing either party that their connection has been taken away. -# Enabling TCP Keepalives prevents this from happening. - -# Overall state of TCP Keepalives, enable (1 or True), disable (0 or False) -# or leave to the OS defaults (-1), on Linux, typically disabled. Default True, enabled. -#tcp_keepalive: True - -# How long before the first keepalive should be sent in seconds. Default 300 -# to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds -# on Linux see /proc/sys/net/ipv4/tcp_keepalive_time. -#tcp_keepalive_idle: 300 - -# How many lost probes are needed to consider the connection lost. Default -1 -# to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes. -#tcp_keepalive_cnt: -1 - -# How often, in seconds, to send keepalives after the first one. Default -1 to -# use OS defaults, typically 75 seconds on Linux, see -# /proc/sys/net/ipv4/tcp_keepalive_intvl. -#tcp_keepalive_intvl: -1 - - -###### Windows Software settings ###### -############################################ -# Location of the repository cache file on the master: -#win_repo_cachefile: 'salt://win/repo/winrepo.p' - - -###### Returner settings ###### -############################################ -# Which returner(s) will be used for minion's result: -#return: mysql diff --git a/states/roles/maintain/sshserver/init.sls b/states/roles/maintain/sshserver/init.sls index aa9495a..06e1b6c 100644 --- a/states/roles/maintain/sshserver/init.sls +++ b/states/roles/maintain/sshserver/init.sls @@ -30,6 +30,6 @@ ssh-server-config: ssh-server-service: service.running: - name: sshd - - enabled: true + - enable: true - watch: - file: ssh-server-config diff --git a/states/roles/maintain/ytdownloader/feedsbase.csv b/states/roles/maintain/ytdownloader/feedsbase.csv deleted file mode 100644 index ca17661..0000000 --- a/states/roles/maintain/ytdownloader/feedsbase.csv +++ /dev/null @@ -1,4 +0,0 @@ -RoosterTeeth http://podsync.net/sv8x /mnt/video/torrents/Process/ RWBY RWBY.S[Volume ]E[Chapter ] -RWBY http://podsync.net/8dzz /mnt/video/torrents/Process/ RWBY RWBY.S[Volume ]E[Chapter ] -RvB http://podsync.net/sv8x /mnt/video/torrents/Process/ Red vs. Blue Red.vs.Blue.S[Season ]E[Episode ] -RvB1-14 http://podsync.net/8jvQ /mnt/video/torrents/Process/ Season 1 Red.vs.Blue.S[Season ]E[Episode ] diff --git a/states/roles/maintain/ytdownloader/init.sls b/states/roles/maintain/ytdownloader/init.sls index 7c88d58..c9b545d 100644 --- a/states/roles/maintain/ytdownloader/init.sls +++ b/states/roles/maintain/ytdownloader/init.sls @@ -6,22 +6,6 @@ yt-python: - python - python-requests -"/root/scripts/ytdownloader/feedsbase.csv": - file.managed: - - source: salt://roles/maintain/ytdownloader/feedsbase.csv - - user: root - - group: root - - mode: 644 - - makedirs: true - -"/root/scripts/ytdownloader/ytdownloader.py": - file.managed: - - source: salt://roles/maintain/ytdownloader/ytdownloader.py - - user: root - - group: root - - mode: 644 - - makedirs: true - "/usr/lib/systemd/system/ytdownloader.service": file.managed: - source: salt://roles/maintain/ytdownloader/ytdownloader.service diff --git a/states/roles/maintain/ytdownloader/ytdownloader.py b/states/roles/maintain/ytdownloader/ytdownloader.py deleted file mode 100644 index 959549c..0000000 --- a/states/roles/maintain/ytdownloader/ytdownloader.py +++ /dev/null @@ -1,100 +0,0 @@ -#download files from youtube using rss feeds -import urllib.request -import xml.etree.ElementTree as ET -import requests -import csv -import os.path -import re -import shutil -from datetime import datetime - -def getname(name,scheme): - #find matching [*] - locs=re.findall('(?<=\[)[^\[\]]+(?=\])',scheme) - digits=[] - for loc in locs: - mstr='(?<='+loc+')[0-9]+' - dig=re.findall(mstr,name) - if(len(dig)>0): - digits.append(dig[0]) - schema=re.split('(\[[^\[\]]+\])',scheme) - nname='' - count=0 - for bit in schema: - if(re.match('\[.*\]',bit)): - if(len(digits)>count): - nname+=digits[count].zfill(2) - count+=1 - else: - nname+=bit - if(count == len(locs)): - return nname - else: - return name - -def downloadfile(name,dest,url): - name=name+".mp4" - print('Saving to: '+name) - urllib.request.urlretrieve(url,name) - print('Moving to: '+dest+name) - shutil.move(name,dest+name) - print('Done') - -def downloadfromfeed(feed): - url=feed[1] - dest=feed[2] - filt=feed[3] - scheme=feed[4] - lastdl=datetime.strptime(feed[5], '%Y-%m-%d %H:%M:%S') - response = urllib.request.urlopen(url) - data = response.read() - text = data.decode('utf-8') - root = ET.fromstring(text) - for child in root.findall('.//item'): - title=child.find('.//title') - date=datetime.strptime(child.find('.//pubDate').text, '%a, %d %b %Y %H:%M:%S %Z') - if(date>lastdl): - if(filt in title.text): - if(date>datetime.strptime(feed[5], '%Y-%m-%d %H:%M:%S')): - feed[5]=date.strftime('%Y-%m-%d %H:%M:%S') - enc=child.find('.//enclosure') - name=title.text - target=enc.attrib.get('url') - print('Downloading: '+name) - fname=getname(name,scheme) - downloadfile(fname,dest,target) - -def downloadfeeds(feeds): - for feed in feeds: - downloadfromfeed(feed) - -def readcsv(f,base): - data=[] - if(os.path.exists(f)): - with open(f) as csvfile: - reader=csv.reader(csvfile, delimiter='\t') - for row in reader: - data.append(row) - dnames=[row[0] for row in data] - with open(base) as basefile: - reader=csv.reader(basefile, delimiter='\t') - for row in reader: - if(row[0] not in dnames): - row.append('2000-01-01 01:01:01') - data.append(row) - dnames.append(row[0]) - return data - -def savecsv(data,f): - with open(f, 'w') as csvfile: - writer=csv.writer(csvfile, delimiter='\t') - writer.writerows(data) - -os.chdir('/root/scripts/ytdownloader') -csvfile='feeds.csv' -csvbase='feedsbase.csv' -data=readcsv(csvfile,csvbase) -downloadfeeds(data) -print(data) -savecsv(data,csvfile) - diff --git a/states/systems/core/ldap.sss/init.sls b/states/systems/core/ldap.sss/init.sls deleted file mode 100644 index 63e862c..0000000 --- a/states/systems/core/ldap.sss/init.sls +++ /dev/null @@ -1,48 +0,0 @@ -openldap_client: - pkg.installed: - - pkgs: - - openldap - -/etc/openldap/ldap.conf: - file.managed: - - source: salt://systems/core/ldap/ldap.conf - - user: root - - group: root - - mode: 644 - -sssd: - pkg.installed: [] - service.running: - - enable: true - - watch: - - file: /etc/sssd/sssd.conf - -/etc/sssd/sssd.conf: - file.managed: - - source: salt://systems/core/ldap/sssd.conf - - user: root - - group: root - - mode: 600 - -/etc/nscd.conf: - file.managed: - - source: salt://systems/core/ldap/nscd.conf - - user: root - - group: root - - mode: 644 - -/etc/nsswitch.conf: - file.managed: - - source: salt://systems/core/ldap/nsswitch.conf - - user: root - - group: root - - mode: 644 - - -/etc/pam.d: - file.recurse: - - source: salt://systems/core/ldap/pam.d - - user: root - - group: root - - dir_mode: 755 - - file_mode: 644 diff --git a/states/systems/core/ldap.sss/ldap.conf b/states/systems/core/ldap.sss/ldap.conf deleted file mode 100644 index 2fe899c..0000000 --- a/states/systems/core/ldap.sss/ldap.conf +++ /dev/null @@ -1,21 +0,0 @@ -# -# LDAP Defaults -# - -# See ldap.conf(5) for details -# This file should be world readable but not world writable. - -#BASE dc=example,dc=com -#URI ldap://ldap.example.com ldap://ldap-master.example.com:666 - -#SIZELIMIT 12 -#TIMELIMIT 15 -#DEREF never -BASE dc=actcur,dc=com - -TLS_REQCERT demand - -URI ldaps://ldap.actcur.com:636 -TLS_CACERT /etc/openldap/certs/chain.pem -TLS_CACERTDIR /etc/openldap/certs/ - diff --git a/states/systems/core/ldap.sss/nscd.conf b/states/systems/core/ldap.sss/nscd.conf deleted file mode 100644 index d60b39f..0000000 --- a/states/systems/core/ldap.sss/nscd.conf +++ /dev/null @@ -1,88 +0,0 @@ -# -# /etc/nscd.conf -# -# An example Name Service Cache config file. This file is needed by nscd. -# -# Legal entries are: -# -# logfile -# debug-level -# threads -# max-threads -# server-user -# server-user is ignored if nscd is started with -S parameters -# stat-user -# reload-count unlimited| -# paranoia -# restart-interval