diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..61a5c983 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,6 @@ +# DO NOT CHANGE THIS FILE PLEASE. + +open_collective: laradock +ko_fi: laradock +issuehunt: laradock +custom: ['beerpay.io/laradock/laradock', 'paypal.me/mzmmzz'] diff --git a/.github/README.md b/.github/README.md index 2de44626..c3b5c842 100644 --- a/.github/README.md +++ b/.github/README.md @@ -2,7 +2,7 @@ Laradock Logo

-

A Docker PHP development environment that facilitates running PHP Apps on Docker

+

PHP development environment that runs on Docker

Build status @@ -13,10 +13,10 @@ contributions welcome

-

Use Docker First And Learn About It Later

+

Use Docker First - Then Learn About It Later

- forthebadge + forthebadge

@@ -24,16 +24,12 @@

- Laradock Docs + Laradock Docs

-## Sponsors - -Support this project by becoming a sponsor. - -Your logo will show up on the [github repository](https://github.com/laradock/laradock/) index page and the [documentation](http://laradock.io/) main page, with a link to your website. [[Become a sponsor](https://opencollective.com/laradock#sponsor)] +## Sponsors @@ -43,13 +39,14 @@ Your logo will show up on the [github repository](https://github.com/laradock/la - - +For basic sponsorships go to [Open Collective](https://opencollective.com/laradock#sponsor), for golden sponsorships contact support@laradock.io. -## Contributors +*Your logo will show up on the [github repository](https://github.com/laradock/laradock/) index page and the [documentation](http://laradock.io/) main page, with a link to your website.* -#### Core contributors: +## People + +#### Maintainers: - [Mahmoud Zalt](https://github.com/Mahmoudz) @mahmoudz | [Twitter](https://twitter.com/Mahmoud_Zalt) | [Site](http://zalt.me) - [Bo-Yi Wu](https://github.com/appleboy) @appleboy | [Twitter](https://twitter.com/appleboy) - [Philippe Trépanier](https://github.com/philtrep) @philtrep @@ -62,9 +59,11 @@ Your logo will show up on the [github repository](https://github.com/laradock/la - [Milan Urukalo](https://github.com/urukalo) @urukalo - [Vince Chu](https://github.com/vwchu) @vwchu - [Huadong Zuo](https://github.com/zuohuadong) @zuohuadong -- Join us, by submitting 20 useful PR's. +- [Lan Phan](https://github.com/lanphan) @lanphan +- [Ahkui](https://github.com/ahkui) @ahkui +- Join us. -#### Awesome contributors: +#### Awesome Contributors: @@ -74,18 +73,18 @@ Your logo will show up on the [github repository](https://github.com/laradock/la > Help keeping the project development going, by [contributing](http://laradock.io/contributing) or donating a little. > Thanks in advance. -Donate directly via [Paypal](https://www.paypal.me/mzalt) +Donate directly via [Paypal](https://paypal.me/mzmmzz) -[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/mzalt) - -or become a backer on [Open Collective](https://opencollective.com/laradock#backer) - - +[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://paypal.me/mzmmzz) or show your support via [Beerpay](https://beerpay.io/laradock/laradock) [![Beerpay](https://beerpay.io/laradock/laradock/badge.svg?style=flat)](https://beerpay.io/laradock/laradock) +or become a backer on [Open Collective](https://opencollective.com/laradock#backer) + + + ## License diff --git a/.github/home-page-images/documentation-button.png b/.github/home-page-images/documentation-button.png new file mode 100644 index 00000000..4ab17161 Binary files /dev/null and b/.github/home-page-images/documentation-button.png differ diff --git a/.gitignore b/.gitignore index 4235e773..954290e7 100644 --- a/.gitignore +++ b/.gitignore @@ -5,4 +5,12 @@ /.project .docker-sync /jenkins/jenkins_home -/nginx/ssl/ + +/logstash/pipeline/*.conf +/logstash/config/pipelines.yml + +/nginx/ssl/*.crt +/nginx/ssl/*.key +/nginx/ssl/*.csr + +.DS_Store diff --git a/.travis.yml b/.travis.yml index 53e43cea..8773aa32 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,11 +11,13 @@ env: - PHP_VERSION=7.0 BUILD_SERVICE=workspace - PHP_VERSION=7.1 BUILD_SERVICE=workspace - PHP_VERSION=7.2 BUILD_SERVICE=workspace + - PHP_VERSION=7.3 BUILD_SERVICE=workspace - PHP_VERSION=5.6 BUILD_SERVICE=php-fpm - PHP_VERSION=7.0 BUILD_SERVICE=php-fpm - PHP_VERSION=7.1 BUILD_SERVICE=php-fpm - PHP_VERSION=7.2 BUILD_SERVICE=php-fpm + - PHP_VERSION=7.3 BUILD_SERVICE=php-fpm - PHP_VERSION=hhvm BUILD_SERVICE=hhvm @@ -23,13 +25,15 @@ env: - PHP_VERSION=7.0 BUILD_SERVICE=php-worker - PHP_VERSION=7.1 BUILD_SERVICE=php-worker - PHP_VERSION=7.2 BUILD_SERVICE=php-worker + - PHP_VERSION=7.3 BUILD_SERVICE=php-worker - PHP_VERSION=NA BUILD_SERVICE=solr - PHP_VERSION=NA BUILD_SERVICE="mssql rethinkdb aerospike" - - PHP_VERSION=NA BUILD_SERVICE="blackfire minio percona nginx caddy apache2 mysql mariadb postgres postgres-postgis neo4j mongo redis" + - PHP_VERSION=NA BUILD_SERVICE="blackfire minio percona nginx caddy apache2 mysql mariadb postgres postgres-postgis neo4j mongo redis cassandra" - PHP_VERSION=NA BUILD_SERVICE="adminer phpmyadmin pgadmin" - PHP_VERSION=NA BUILD_SERVICE="memcached beanstalkd beanstalkd-console rabbitmq elasticsearch certbot mailhog maildev selenium jenkins proxy proxy2 haproxy" - PHP_VERSION=NA BUILD_SERVICE="kibana grafana laravel-echo-server" + - PHP_VERSION=NA BUILD_SERVICE="ipython-controller manticore" # - PHP_VERSION=NA BUILD_SERVICE="aws" # Installing a newer Docker version diff --git a/DOCUMENTATION/config.toml b/DOCUMENTATION/config.toml index 91fd0724..8fd60626 100644 --- a/DOCUMENTATION/config.toml +++ b/DOCUMENTATION/config.toml @@ -48,42 +48,42 @@ googleAnalytics = "UA-37514928-9" # ------- MENU START ----------------------------------------- [[menu.main]] - name = "Introduction" + name = "1. Introduction" url = "introduction/" weight = 1 [[menu.main]] - name = "Getting Started" + name = "2. Getting Started" url = "getting-started/" weight = 2 [[menu.main]] - name = "Documentation" + name = "3. Documentation" url = "documentation/" weight = 3 [[menu.main]] - name = "Guides" + name = "4. Guides" url = "guides/" weight = 4 [[menu.main]] - name = "Help & Questions" + name = "5. Help & Questions" url = "help/" weight = 5 [[menu.main]] - name = "Related Projects" + name = "6. Related Projects" url = "related-projects/" weight = 6 [[menu.main]] - name = "Contributing" + name = "7. Contributing" url = "contributing/" weight = 7 [[menu.main]] - name = "License" + name = "8. License" url = "license/" weight = 8 diff --git a/DOCUMENTATION/content/contributing/index.md b/DOCUMENTATION/content/contributing/index.md index 24ab6335..1d58dcfc 100644 --- a/DOCUMENTATION/content/contributing/index.md +++ b/DOCUMENTATION/content/contributing/index.md @@ -1,5 +1,5 @@ --- -title: Contributing +title: 7. Contributing type: index weight: 7 --- diff --git a/DOCUMENTATION/content/documentation/index.md b/DOCUMENTATION/content/documentation/index.md index 38337d8b..a9aeb8a1 100644 --- a/DOCUMENTATION/content/documentation/index.md +++ b/DOCUMENTATION/content/documentation/index.md @@ -1,5 +1,5 @@ --- -title: Documentation +title: 3. Documentation type: index weight: 3 --- @@ -297,6 +297,24 @@ e) set it to `true` For information on how to configure xDebug with your IDE and work it out, check this [Repository](https://github.com/LarryEitel/laravel-laradock-phpstorm) or follow up on the next section if you use linux and PhpStorm. + + +
+ +## Start/Stop xDebug: + +By installing xDebug, you are enabling it to run on startup by default. + +To control the behavior of xDebug (in the `php-fpm` Container), you can run the following commands from the Laradock root folder, (at the same prompt where you run docker-compose): + +- Stop xDebug from running by default: `.php-fpm/xdebug stop`. +- Start xDebug by default: `.php-fpm/xdebug start`. +- See the status: `.php-fpm/xdebug status`. + +Note: If `.php-fpm/xdebug` doesn't execute and gives `Permission Denied` error the problem can be that file `xdebug` doesn't have execution access. This can be fixed by running `chmod` command with desired access permissions. + + +
## Install phpdbg @@ -320,37 +338,6 @@ PHP_FPM_INSTALL_PHPDBG=true ``` - -## Setup remote debugging for PhpStorm on Linux - - - Make sure you have followed the steps above in the [Install Xdebug section](#install-xdebug). - - - Make sure Xdebug accepts connections and listens on port 9000. (Should be default configuration). - -![Debug Configuration](/images/photos/PHPStorm/linux/configuration/debugConfiguration.png "Debug Configuration"). - - - Create a server with name `laradock` (matches **PHP_IDE_CONFIG** key in environment file) and make sure to map project root path with server correctly. - -![Server Configuration](/images/photos/PHPStorm/linux/configuration/serverConfiguration.png "Server Configuration"). - - - Start listening for debug connections, place a breakpoint and you are good to go ! - - -
- -## Start/Stop xDebug: - -By installing xDebug, you are enabling it to run on startup by default. - -To control the behavior of xDebug (in the `php-fpm` Container), you can run the following commands from the Laradock root folder, (at the same prompt where you run docker-compose): - -- Stop xDebug from running by default: `.php-fpm/xdebug stop`. -- Start xDebug by default: `.php-fpm/xdebug start`. -- See the status: `.php-fpm/xdebug status`. - -Note: If `.php-fpm/xdebug` doesn't execute and gives `Permission Denied` error the problem can be that file `xdebug` doesn't have execution access. This can be fixed by running `chmod` command with desired access permissions. - -
@@ -394,6 +381,37 @@ Always download the latest version of [Loaders for ionCube ](http://www.ioncube. +
+ + +## Install SonarQube (automatic code review tool) +SonarQube® is an automatic code review tool to detect bugs, vulnerabilities and code smells in your code. It can integrate with your existing workflow to enable continuous code inspection across your project branches and pull requests. +
+1 - Open the `.env` file +
+2 - Search for the `SONARQUBE_HOSTNAME=sonar.example.com` argument +
+3 - Set it to your-domain `sonar.example.com` +
+4 - `docker-compose up -d sonarqube` +
+5 - Open your browser: http://localhost:9000/ + +Troubleshooting: + +if you encounter a database error: +``` +docker-compose exec --user=root postgres +source docker-entrypoint-initdb.d/init_sonarqube_db.sh +``` + +If you encounter logs error: +``` +docker-compose run --user=root --rm sonarqube chown sonarqube:sonarqube /opt/sonarqube/logs +``` +[**SonarQube Documentation Here**](https://docs.sonarqube.org/latest/) + + @@ -409,7 +427,9 @@ Always download the latest version of [Loaders for ionCube ](http://www.ioncube. ## Prepare Laradock for Production -It's recommended for production to create a custom `docker-compose.yml` file. For that reason, Laradock is shipped with `production-docker-compose.yml` which should contain only the containers you are planning to run on production (usage example: `docker-compose -f production-docker-compose.yml up -d nginx mysql redis ...`). +It's recommended for production to create a custom `docker-compose.yml` file, for example `production-docker-compose.yml` + +In your new production `docker-compose.yml` file you should contain only the containers you are planning to run in production (usage example: `docker-compose -f production-docker-compose.yml up -d nginx mysql redis ...`). Note: The Database (MySQL/MariaDB/...) ports should not be forwarded on production, because Docker will automatically publish the port on the host, which is quite insecure, unless specifically told not to. So make sure to remove these lines: @@ -532,29 +552,8 @@ phpunit ## Run Laravel Queue Worker -1 - First add `php-worker` container. It will be similar as like PHP-FPM Container. -
-a) open the `docker-compose.yml` file -
-b) add a new service container by simply copy-paste this section below PHP-FPM container +1 - Create supervisor configuration file (for ex., named `laravel-worker.conf`) for Laravel Queue Worker in `php-worker/supervisord.d/` by simply copy from `laravel-worker.conf.example` -```yaml - php-worker: - build: - context: ./php-worker - args: - - INSTALL_PGSQL=${PHP_WORKER_INSTALL_PGSQL} #Optionally install PGSQL PHP drivers - - INSTALL_BCMATH=${PHP_WORKER_INSTALL_BCMATH} #Optionally install BCMath php package - - INSTALL_SOAP=${PHP_WORKER_INSTALL_SOAP} #Optionally install Soap php package - volumes_from: - - applications - depends_on: - - workspace - extra_hosts: - - "dockerhost:${DOCKER_HOST_IP}" - networks: - - backend -``` 2 - Start everything up ```bash @@ -566,6 +565,34 @@ docker-compose up -d php-worker +
+ +## Run Laravel Scheduler + +Laradock provides 2 ways to run Laravel Scheduler +1 - Using cron in workspace container. Most of the time, when you start Laradock, it'll automatically start workspace container with cron inside, along with setting to run `schedule:run` command every minute. + +2 - Using Supervisord in php-worker to run `schedule:run`. This way is suggested when you don't want to start workspace in production environment. +
+a) Comment out cron setting in workspace container, file `workspace/crontab/laradock` + +```bash +# * * * * * laradock /usr/bin/php /var/www/artisan schedule:run >> /dev/null 2>&1 +``` +
+b) Create supervisor configuration file (for ex., named `laravel-scheduler.conf`) for Laravel Scheduler in `php-worker/supervisord.d/` by simply copy from `laravel-scheduler.conf.example` +
+c) Start php-worker container + +```bash +docker-compose up -d php-worker +``` + + + + + +
## Use Mailu @@ -624,12 +651,12 @@ docker-compose up -d metabase 1) Boot the container `docker-compose up -d jenkins`. To enter the container type `docker-compose exec jenkins bash`. -2) Go to `http://localhost:8090/` (if you didn't chanhed your default port mapping) +2) Go to `http://localhost:8090/` (if you didn't chanhed your default port mapping) 3) Authenticate from the web app. - Default username is `admin`. -- Default password is `docker-compose exec jenkins cat /var/jenkins_home/secrets/initialAdminPassword`. +- Default password is `docker-compose exec jenkins cat /var/jenkins_home/secrets/initialAdminPassword`. (To enter container as root type `docker-compose exec --user root jenkins bash`). @@ -701,6 +728,44 @@ composer require predis/predis:^1.0 +
+ +## Use Redis Cluster + +1 - First make sure you run the Redis-Cluster Container (`redis-cluster`) with the `docker-compose up` command. + +```bash +docker-compose up -d redis-cluster +``` + +2 - Open your Laravel's `config/database.php` and set the redis cluster configuration. Below is example configuration with phpredis. + +Read the [Laravel official documentation](https://laravel.com/docs/5.7/redis#configuration) for more details. + +```php +'redis' => [ + 'client' => 'phpredis', + 'options' => [ + 'cluster' => 'redis', + ], + 'clusters' => [ + 'default' => [ + [ + 'host' => 'redis-cluster', + 'password' => null, + 'port' => 7000, + 'database' => 0, + ], + ], + ], +], +``` + + + + + +
## Use Mongo @@ -817,6 +882,67 @@ docker-compose up -d gitlab +
+ +## Use Gitlab Runner + +1 - Retrieve the registration token in your gitlab project (Settings > CI / CD > Runners > Set up a specific Runner manually) + +2 - Open the `.env` file and set the following changes: +``` +# so that gitlab container will pass the correct domain to gitlab-runner container +GITLAB_DOMAIN_NAME=http://gitlab + +GITLAB_RUNNER_REGISTRATION_TOKEN= + +# so that gitlab-runner container will send POST request for registration to correct domain +GITLAB_CI_SERVER_URL=http://gitlab +``` + +3 - Open the `docker-compose.yml` file and add the following changes: +```yml + gitlab-runner: + environment: # these values will be used during `gitlab-runner register` + - RUNNER_EXECUTOR=docker # change from shell (default) + - DOCKER_IMAGE=alpine + - DOCKER_NETWORK_MODE=laradock_backend + networks: + - backend # connect to network where gitlab service is connected +``` + +4 - Run the Gitlab-Runner Container (`gitlab-runner`) with the `docker-compose up` command. Example: + +```bash +docker-compose up -d gitlab-runner +``` + +5 - Register the gitlab-runner to the gitlab container + +```bash +docker-compose exec gitlab-runner bash +gitlab-runner register +``` + +6 - Create a `.gitlab-ci.yml` file for your pipeline + +```yml +before_script: + - echo Hello! + +job1: + scripts: + - echo job1 +``` + +7 - Push changes to gitlab + +8 - Verify that pipeline is run successful + + + + + +
## Use Adminer @@ -917,8 +1043,21 @@ _Note: You can customize the port on which beanstalkd console is listening by ch +
+ +## Use Confluence +1 - Run the Confluence Container (`confluence`) with the `docker-compose up` command. Example: +```bash +docker-compose up -d confluence +``` + +2 - Open your browser and visit the localhost on port **8090**: `http://localhost:8090` + +**Note:** You can you trial version and then you have to buy a licence to use it. + +You can set custom confluence version in `CONFLUENCE_VERSION`. [Find more info in section 'Versioning'](https://hub.docker.com/r/atlassian/confluence-server/)
@@ -1011,8 +1150,9 @@ docker-compose up -d rethinkdb - set the `DB_DATABASE` to `database`. +#### Additional Notes - +- You may do backing up of your data using the next reference: [backing up your data](https://www.rethinkdb.com/docs/backup/).
@@ -1114,6 +1254,140 @@ docker-compose up -d grafana +
+ +## Use Graylog + +1 - Boot the container `docker-compose up -d graylog` + +2 - Open your Laravel's `.env` file and set the `GRAYLOG_PASSWORD` to some passsword, and `GRAYLOG_SHA256_PASSWORD` to the sha256 representation of your password (`GRAYLOG_SHA256_PASSWORD` is what matters, `GRAYLOG_PASSWORD` is just a reminder of your password). + +> Your password must be at least 16 characters long +> You can generate sha256 of some password with the following command `echo -n somesupersecretpassword | sha256sum` + +```env +GRAYLOG_PASSWORD=somesupersecretpassword +GRAYLOG_SHA256_PASSWORD=b1cb6e31e172577918c9e7806c572b5ed8477d3f57aa737bee4b5b1db3696f09 +``` + +3 - Go to `http://localhost:9000/` (if your port is not changed) + +4 - Authenticate from the app. + +> Username: admin +> Password: somesupersecretpassword (if you haven't changed the password) + +5 - Go to the system->inputs and launch new input + + + + + + +
+ +## Use Traefik + +To use Traefik you need to do some changes in `traefik/trafik.toml` and `docker-compose.yml`. + +1 - Open `traefik.toml` and change the `e-mail` property in `acme` section. + +2 - Change your domain in `acme.domains`. For example: `main = "example.org"` + +2.1 - If you have subdomains, you must add them to `sans` property in `acme.domains` section. + +```bash +[[acme.domais]] + main = "example.org" + sans = ["monitor.example.org", "pma.example.org"] +``` + +3 - If you need to add basic authentication (https://docs.traefik.io/configuration/entrypoints/#basic-authentication), you just need to add the following text after `[entryPoints.https.tls]`: + +```bash +[entryPoints.https.auth.basic] + users = ["user:password"] +``` + +4 - You need to change the `docker-compose.yml` file to match the Traefik needs. If you want to use Traefik, you must not expose the ports of each container to the internet, but specify some labels. + +4.1 For example, let's try with NGINX. You must have: + +```bash +nginx: + build: + context: ./nginx + args: + - PHP_UPSTREAM_CONTAINER=${NGINX_PHP_UPSTREAM_CONTAINER} + - PHP_UPSTREAM_PORT=${NGINX_PHP_UPSTREAM_PORT} + - CHANGE_SOURCE=${CHANGE_SOURCE} + volumes: + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${NGINX_HOST_LOG_PATH}:/var/log/nginx + - ${NGINX_SITES_PATH}:/etc/nginx/sites-available + depends_on: + - php-fpm + networks: + - frontend + - backend + labels: + - traefik.backend=nginx + - traefik.frontend.rule=Host:example.org + - traefik.port=80 +``` + +instead of + +```bash +nginx: + build: + context: ./nginx + args: + - PHP_UPSTREAM_CONTAINER=${NGINX_PHP_UPSTREAM_CONTAINER} + - PHP_UPSTREAM_PORT=${NGINX_PHP_UPSTREAM_PORT} + - CHANGE_SOURCE=${CHANGE_SOURCE} + volumes: + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${NGINX_HOST_LOG_PATH}:/var/log/nginx + - ${NGINX_SITES_PATH}:/etc/nginx/sites-available + - ${NGINX_SSL_PATH}:/etc/nginx/ssl + ports: + - "${NGINX_HOST_HTTP_PORT}:80" + - "${NGINX_HOST_HTTPS_PORT}:443" + depends_on: + - php-fpm + networks: + - frontend + - backend +``` + + + + + +
+ +## Use Mosquitto (MQTT Broker) + +1 - Configure Mosquitto: Change Port using `MOSQUITTO_PORT` if you wish to. Default is port 9001. + +2 - Run the Mosquitto Container (`mosquitto`) with the `docker-compose up`command: + +```bash +docker-compose up -d mosquitto +``` + +3 - Open your command line and use a MQTT Client (Eg. https://github.com/mqttjs/MQTT.js) to subscribe a topic and publish a message. + +4 - Subscribe: `mqtt sub -t 'test' -h localhost -p 9001 -C 'ws' -v` + +5 - Publish: `mqtt pub -t 'test' -h localhost -p 9001 -C 'ws' -m 'Hello!'` + + + + + +
@@ -1139,6 +1413,21 @@ To install CodeIgniter 3 on Laradock all you have to do is the following simple +
+ +## Install Powerline + +1 - Open the `.env` file and set `WORKSPACE_INSTALL_POWERLINE` and `WORKSPACE_INSTALL_PYTHON` to `true`. + +2 - Run `docker-compose build workspace`, after the step above. + +Powerline is required python + + + + + +
## Install Symfony @@ -1188,6 +1477,19 @@ We also recommend [setting the timezone in Laravel](http://www.camroncade.com/ma +
+ +## Add locales to PHP-FPM + +To add locales to the container: + +1 - Open the `.env` file and set `PHP_FPM_INSTALL_ADDITIONAL_LOCALES` to `true`. + +2 - Add locale codes to `PHP_FPM_ADDITIONAL_LOCALES`. + +3 - Re-build your PHP-FPM Container `docker-compose build php-fpm`. + +4 - Check enabled locales with `docker-compose exec php-fpm locale -a` @@ -1198,7 +1500,7 @@ We also recommend [setting the timezone in Laravel](http://www.camroncade.com/ma You can add your cron jobs to `workspace/crontab/root` after the `php artisan` line. ``` -* * * * * php /var/www/artisan schedule:run >> /dev/null 2>&1 +* * * * * laradock /usr/bin/php /var/www/artisan schedule:run >> /dev/null 2>&1 # Custom cron * * * * * root echo "Every Minute" > /var/log/cron.log 2>&1 @@ -1266,22 +1568,6 @@ Available versions are: 5.5, 5.6, 5.7, 8.0, or latest. See https://store.docker -
- -## MySQL access from host - -You can forward the MySQL/MariaDB port to your host by making sure these lines are added to the `mysql` or `mariadb` section of the `docker-compose.yml` or in your [environment specific Compose](https://docs.docker.com/compose/extends/) file. - -``` -ports: - - "3306:3306" -``` - - - - - -
## MySQL root access @@ -1380,6 +1666,23 @@ Enabling Global Composer Install during the build for the container allows you t +
+ +## Add authentication credential for Magento 2 + +1 - Open the `.env` file + +2 - Search for the `WORKSPACE_COMPOSER_AUTH` argument under the Workspace Container and set it to `true` + +3 - Now add your credentials to `workspace/auth.json` + +4 - Re-build the Workspace Container `docker-compose build workspace` + + + + + +
## Install Prestissimo @@ -1488,6 +1791,22 @@ To install NPM VUE CLI in the Workspace container +
+ +## Install NPM ANGULAR CLI + +To install NPM ANGULAR CLI in the Workspace container + +1 - Open the `.env` file + +2 - Search for the `WORKSPACE_INSTALL_NPM_ANGULAR_CLI` argument under the Workspace Container and set it to `true` + +3 - Re-build the container `docker-compose build workspace` + + + + +
@@ -1505,6 +1824,47 @@ Linuxbrew is a package manager for Linux. It is the Linux version of MacOS Homeb +
+ +## Install FFMPEG + +To install FFMPEG in the Workspace container + +1 - Open the `.env` file + +2 - Search for the `WORKSPACE_INSTALL_FFMPEG` argument under the Workspace Container and set it to `true` + +3 - Re-build the container `docker-compose build workspace` + +4 - If you use the `php-worker` container too, please follow the same steps above especially if you have conversions that have been queued. + +**PS** Don't forget to install the binary in the `php-fpm` container too by applying the same steps above to its container, otherwise the you'll get an error when running the `php-ffmpeg` binary. + + + + + + +
+ +## Install GNU Parallel + +GNU Parallel is a command line tool to run multiple processes in parallel. + +(see https://www.gnu.org/software/parallel/parallel_tutorial.html) + +To install GNU Parallel in the Workspace container + +1 - Open the `.env` file + +2 - Search for the `WORKSPACE_INSTALL_GNU_PARALLEL` argument under the Workspace Container and set it to `true` + +3 - Re-build the container `docker-compose build workspace` + + + + +
@@ -1640,22 +2000,6 @@ Remote debug Laravel web and phpunit tests. -
- -## Upgrading Laradock - -Moving from Docker Toolbox (VirtualBox) to Docker Native (for Mac/Windows). Requires upgrading Laradock from v3.* to v4.*: - -1. Stop the docker VM `docker-machine stop {default}` -2. Install Docker for [Mac](https://docs.docker.com/docker-for-mac/) or [Windows](https://docs.docker.com/docker-for-windows/). -3. Upgrade Laradock to `v4.*.*` (`git pull origin master`) -4. Use Laradock as you used to do: `docker-compose up -d nginx mysql`. - -**Note:** If you face any problem with the last step above: rebuild all your containers -`docker-compose build --no-cache` -"Warning Containers Data might be lost!" - - @@ -1720,7 +2064,7 @@ Laradock comes with `sync.sh`, an optional bash script, that automates installin DOCKER_SYNC_STRATEGY=native_osx ``` -3) set `APP_CODE_PATH_CONTAINER=/var/www` to `APP_CODE_PATH_CONTAINER=/var/www:nocopy` in the .env file +3) set `APP_CODE_CONTAINER_FLAG` to `APP_CODE_CONTAINER_FLAG=:nocopy` in the .env file 4) Install the docker-sync gem on the host-machine: ```bash @@ -1823,126 +2167,17 @@ docker-compose up ... - - -
- -## Common Problems + +## Upgrade Laradock -*Here's a list of the common problems you might face, and the possible solutions.* +Moving from Docker Toolbox (VirtualBox) to Docker Native (for Mac/Windows). Requires upgrading Laradock from v3.* to v4.*: +1. Stop the docker VM `docker-machine stop {default}` +2. Install Docker for [Mac](https://docs.docker.com/docker-for-mac/) or [Windows](https://docs.docker.com/docker-for-windows/). +3. Upgrade Laradock to `v4.*.*` (`git pull origin master`) +4. Use Laradock as you used to do: `docker-compose up -d nginx mysql`. - - - - -
-## I see a blank (white) page instead of the Laravel 'Welcome' page! - -Run the following command from the Laravel root directory: - -```bash -sudo chmod -R 777 storage bootstrap/cache -``` - - - - - - -
-## I see "Welcome to nginx" instead of the Laravel App! - -Use `http://127.0.0.1` instead of `http://localhost` in your browser. - - - - - - -
-## I see an error message containing `address already in use` or `port is already allocated` - -Make sure the ports for the services that you are trying to run (22, 80, 443, 3306, etc.) are not being used already by other programs on the host, such as a built in `apache`/`httpd` service or other development tools you have installed. - - - - - - -
-## I get NGINX error 404 Not Found on Windows. - -1. Go to docker Settings on your Windows machine. -2. Click on the `Shared Drives` tab and check the drive that contains your project files. -3. Enter your windows username and password. -4. Go to the `reset` tab and click restart docker. - - - - - - -
-## The time in my services does not match the current time - -1. Make sure you've [changed the timezone](#Change-the-timezone). -2. Stop and rebuild the containers (`docker-compose up -d --build `) - - - - - - -
-## I get MySQL connection refused - -This error sometimes happens because your Laravel application isn't running on the container localhost IP (Which is 127.0.0.1). Steps to fix it: - -* Option A - 1. Check your running Laravel application IP by dumping `Request::ip()` variable using `dd(Request::ip())` anywhere on your application. The result is the IP of your Laravel container. - 2. Change the `DB_HOST` variable on env with the IP that you received from previous step. -* Option B - 1. Change the `DB_HOST` value to the same name as the MySQL docker container. The Laradock docker-compose file currently has this as `mysql` - -## I get stuck when building nginx on `fetch http://mirrors.aliyun.com/alpine/v3.5/main/x86_64/APKINDEX.tar.gz` - -As stated on [#749](https://github.com/laradock/laradock/issues/749#issuecomment-419652646), Already fixed,just set `CHANGE_SOURCE` to false. - -## Custom composer repo packagist url and npm registry url - -In China, the origin source of composer and npm is very slow. You can add `WORKSPACE_NPM_REGISTRY` and `WORKSPACE_COMPOSER_REPO_PACKAGIST` config in `.env` to use your custom source. - -Example: -```bash -WORKSPACE_NPM_REGISTRY=https://registry.npm.taobao.org -WORKSPACE_COMPOSER_REPO_PACKAGIST=https://packagist.phpcomposer.com -``` - -
- -## I get `Module build failed: Error: write EPIPE` while compiling react application - -When you run `npm build` or `yarn dev` building a react application using webpack with elixir you may receive a `Error: write EPIPE` while processing .jpg images. - -This is caused of an outdated library for processing **.jpg files** in ubuntu 16.04. - -To fix the problem you can follow those steps - -1 - Open the `.env`. - -2 - Search for `WORKSPACE_INSTALL_LIBPNG` or add the key if missing. - -3 - Set the value to true: - -```dotenv -WORKSPACE_INSTALL_LIBPNG=true -``` - -4 - Finally rebuild the workspace image - -```bash -docker-compose build workspace -``` - +**Note:** If you face any problem with the last step above: rebuild all your containers +`docker-compose build --no-cache` +"Warning Containers Data might be lost!" diff --git a/DOCUMENTATION/content/getting-started/index.md b/DOCUMENTATION/content/getting-started/index.md index 8262ead1..f71661da 100644 --- a/DOCUMENTATION/content/getting-started/index.md +++ b/DOCUMENTATION/content/getting-started/index.md @@ -1,10 +1,10 @@ --- -title: Getting Started +title: 2. Getting Started type: index weight: 2 --- -## Requirements +## 2.1 Requirements - [Git](https://git-scm.com/downloads) - [Docker](https://www.docker.com/products/docker/) `>= 17.12` @@ -12,10 +12,7 @@ weight: 2 - - - -## Installation +## 2.2 Installation Choose the setup the best suits your needs. @@ -44,7 +41,7 @@ Note: If you are not using Git yet for your project, you can use `git clone` ins *To keep track of your Laradock changes, between your projects and also keep Laradock updated [check these docs](/documentation/#keep-track-of-your-laradock-changes)* -Your folder structure should look like this: +2 - Make sure your folder structure should look like this: ``` + project-a @@ -55,7 +52,7 @@ Your folder structure should look like this: *(It's important to rename the laradock folders to unique name in each project, if you want to run laradock per project).* -> **Now jump to the [Usage](#Usage) section.** +3 - Go to the [Usage](#Usage) section. ### A.2) Don't have a PHP project yet: @@ -89,7 +86,7 @@ APP_CODE_PATH_HOST=../project-z/ Make sure to replace `project-z` with your project folder name. -> **Now jump to the [Usage](#Usage) section.** +3 - Go to the [Usage](#Usage) section. @@ -110,9 +107,11 @@ Your folder structure should look like this: + project-2 ``` -2 - Go to `nginx/sites` and create config files to point to different project directory when visiting different domains. +2 - Go to your web server and create config files to point to different project directory when visiting different domains: -Laradock by default includes `app.conf.example`, `laravel.conf.example` and `symfony.conf.example` as working samples. +For **Nginx** go to `nginx/sites`, for **Apache2** `apache2/sites`. + +Laradock by default includes some sample files for you to copy `app.conf.example`, `laravel.conf.example` and `symfony.conf.example`. 3 - change the default names `*.conf`: @@ -125,9 +124,10 @@ You can rename the config files, project folders and domains as you like, just m 127.0.0.1 project-2.test ... ``` + If you use Chrome 63 or above for development, don't use `.dev`. [Why?](https://laravel-news.com/chrome-63-now-forces-dev-domains-https). Instead use `.localhost`, `.invalid`, `.test`, or `.example`. -> **Now jump to the [Usage](#Usage) section.** +4 - Go to the [Usage](#Usage) section. @@ -136,7 +136,7 @@ If you use Chrome 63 or above for development, don't use `.dev`. [Why?](https:// -## Usage +## 2.3 Usage **Read Before starting:** @@ -213,7 +213,16 @@ Open your PHP project's `.env` file or whichever configuration file you are read DB_HOST=mysql ``` +You need to use the Laradock's default DB credentials which can be found in the `.env` file (ex: `MYSQL_USER=`). +Or you can change them and rebuild the container. + *If you want to install Laravel as PHP project, see [How to Install Laravel in a Docker Container](#Install-Laravel).*
-5 - Open your browser and visit your localhost address `http://localhost/`. If you followed the multiple projects setup, you can visit `http://project-1.test/` and `http://project-2.test/`. +5 - Open your browser and visit your localhost address. + +If you followed the multiple projects setup, you can visit `http://project-1.test/` and `http://project-2.test/`. + +[http://localhost:8080](http://localhost:8080) + +Make sure you add use the right port number as provided by your running server. Ex: NGINX uses port 8080 by default while Apache2 uses 80. diff --git a/DOCUMENTATION/content/guides/index.md b/DOCUMENTATION/content/guides/index.md index 03f0b960..451c0027 100644 --- a/DOCUMENTATION/content/guides/index.md +++ b/DOCUMENTATION/content/guides/index.md @@ -1,21 +1,14 @@ --- -title: Guides +title: 4. Guides type: index weight: 4 --- - -* [Production Setup on Digital Ocean](#Digital-Ocean) -* [PHPStorm XDebug Setup](#PHPStorm-Debugging) -* [Running Laravel Dusk Test](#Laravel-Dusk) - - - -# Production Setup on Digital Ocean +## Production Setup on Digital Ocean -## Install Docker +### Install Docker - Visit [DigitalOcean](https://cloud.digitalocean.com/login) and login. - Click the `Create Droplet` button. @@ -24,7 +17,7 @@ weight: 4 - Continue creating the droplet as you normally would. - If needed, check your e-mail for the droplet root password. -## SSH to your Server +### SSH to your Server Find the IP address of the droplet in the DigitalOcean interface. Use it to connect to the server. @@ -40,7 +33,7 @@ You can now check if Docker is available: $root@server:~# docker ``` -## Set Up Your Laravel Project +### Set Up Your Laravel Project ``` $root@server:~# apt-get install git @@ -50,18 +43,12 @@ $root@server:~/laravel/ git submodule add https://github.com/Laradock/laradock.g $root@server:~/laravel/ cd laradock ``` -## Install docker-compose command - -``` -$root@server:~/laravel/laradock# curl -L https://github.com/docker/compose/releases/download/1.8.0/run.sh > /usr/local/bin/docker-compose -$root@server:~/chmod +x /usr/local/bin/docker-compose -``` -## Enter the laradock folder and rename env-example to .env. +### Enter the laradock folder and rename env-example to .env. ``` $root@server:~/laravel/laradock# cp env-example .env ``` -## Create Your Laradock Containers +### Create Your Laradock Containers ``` $root@server:~/laravel/laradock# docker-compose up -d nginx mysql @@ -69,13 +56,21 @@ $root@server:~/laravel/laradock# docker-compose up -d nginx mysql Note that more containers are available, find them in the [docs](http://laradock.io/introduction/#supported-software-containers) or the `docker-compose.yml` file. -## Go to Your Workspace +### Go to Your Workspace ``` docker-compose exec workspace bash ``` -## Install and configure Laravel +### Execute commands + +If you want to only execute some command and don't want to enter bash, you can execute `docker-compose run workspace `. + +``` +docker-compose run workspace php artisan migrate +``` + +### Install and configure Laravel Let's install Laravel's dependencies, add the `.env` file, generate the key and give proper permissions to the cache folder. @@ -98,7 +93,7 @@ It should show you the Laravel default welcome page. However, we want it to show up using your custom domain name, as well. -## Using Your Own Domain Name +### Using Your Own Domain Name Login to your DNS provider, such as Godaddy, Namecheap. @@ -116,7 +111,7 @@ Visit: https://cloud.digitalocean.com/networking/domains Add your domain name and choose the server IP you'd provision earlier. -## Serving Site With NGINX (HTTP ONLY) +### Serving Site With NGINX (HTTP ONLY) Go back to command line. @@ -140,14 +135,14 @@ And add `server_name` (your custom domain) server_name yourdomain.com; ``` -## Rebuild Your Nginx +### Rebuild Your Nginx ``` $root@server:~/laravel/laradock# docker-compose down $root@server:~/laravel/laradock# docker-compose build nginx ``` -## Re Run Your Containers MYSQL and NGINX +### Re Run Your Containers MYSQL and NGINX ``` $root@server:~/laravel/laradock/nginx# docker-compose up -d nginx mysql @@ -155,7 +150,7 @@ $root@server:~/laravel/laradock/nginx# docker-compose up -d nginx mysql **View Your Site with HTTP ONLY (http://yourdomain.com)** -## Run Site on SSL with Let's Encrypt Certificate +### Run Site on SSL with Let's Encrypt Certificate **Note: You need to Use Caddy here Instead of Nginx** @@ -194,7 +189,7 @@ tls serverbreaker@gmai.com This is needed Prior to Creating Let's Encypt -## Run Your Caddy Container without the -d flag and Generate SSL with Let's Encrypt +### Run Your Caddy Container without the -d flag and Generate SSL with Let's Encrypt ``` $root@server:~/laravel/laradock# docker-compose up caddy @@ -215,7 +210,7 @@ caddy_1 | http://yourdomain.com After it finishes, press `Ctrl` + `C` to exit. -## Stop All Containers and ReRun Caddy and Other Containers on Background +### Stop All Containers and ReRun Caddy and Other Containers on Background ``` $root@server:~/laravel/laradock# docker-compose down @@ -236,326 +231,6 @@ View your Site in the Browser Securely Using HTTPS (https://yourdomain.com) - [https://caddyserver.com/docs/tls](https://caddyserver.com/docs/tls) - [https://caddyserver.com/docs/caddyfile](https://caddyserver.com/docs/caddyfile) - - - - -
-
-
-
-
- - -# PHPStorm XDebug Setup - -- [Intro](#Intro) -- [Installation](#Installation) - - [Customize laradock/docker-compose.yml](#CustomizeDockerCompose) - - [Clean House](#InstallCleanHouse) - - [Laradock Dial Tone](#InstallLaradockDialTone) - - [hosts](#AddToHosts) - - [Firewall](#FireWall) - - [Enable xDebug on php-fpm](#enablePhpXdebug) - - [PHPStorm Settings](#InstallPHPStorm) - - [Configs](#InstallPHPStormConfigs) -- [Usage](#Usage) - - [Laravel](#UsageLaravel) - - [Run ExampleTest](#UsagePHPStormRunExampleTest) - - [Debug ExampleTest](#UsagePHPStormDebugExampleTest) - - [Debug Web Site](#UsagePHPStormDebugSite) -- [SSH into workspace](#SSHintoWorkspace) - - [KiTTY](#InstallKiTTY) - - -## Intro - -Wiring up [Laravel](https://laravel.com/), [Laradock](https://github.com/Laradock/laradock) [Laravel+Docker] and [PHPStorm](https://www.jetbrains.com/phpstorm/) to play nice together complete with remote xdebug'ing as icing on top! Although this guide is based on `PHPStorm Windows`, -you should be able to adjust accordingly. This guide was written based on Docker for Windows Native. - - -## Installation - -- This guide assumes the following: - - you have already installed and are familiar with Laravel, Laradock and PHPStorm. - - you have installed Laravel as a parent of `laradock`. This guide assumes `/c/_dk/laravel`. - - -## hosts -- Add `laravel` to your hosts file located on Windows 10 at `C:\Windows\System32\drivers\etc\hosts`. It should be set to the IP of your running container. Mine is: `10.0.75.2` -On Windows you can find it by opening Windows `Hyper-V Manager`. - - ![Windows Hyper-V Manager](images/photos/PHPStorm/Settings/WindowsHyperVManager.png) - -- [Hosts File Editor](https://github.com/scottlerch/HostsFileEditor) makes it easy to change your hosts file. - - Set `laravel` to your docker host IP. See [Example](images/photos/SimpleHostsEditor/AddHost_laravel.png). - - - -## Firewall -Your PHPStorm will need to be able to receive a connection from PHP xdebug either your running workspace or php-fpm containers on port 9000. This means that your Windows Firewall should either enable connections from the Application PHPStorm OR the port. - -- It is important to note that if the Application PHPStorm is NOT enabled in the firewall, you will not be able to recreate a rule to override that. -- Also be aware that if you are installing/upgrade different versions of PHPStorm, you MAY have orphaned references to PHPStorm in your Firewall! You may decide to remove orphaned references however in either case, make sure that they are set to receive public TCP traffic. - -### Edit laradock/docker-compose.yml -Set the following variables: -``` -### Workspace Utilities Container ############### - - workspace: - build: - context: ./workspace - args: - - INSTALL_XDEBUG=true - - INSTALL_WORKSPACE_SSH=true - ... - - -### PHP-FPM Container ##################### - - php-fpm: - build: - context: ./php-fpm - args: - - INSTALL_XDEBUG=true - ... - -``` - -### Edit xdebug.ini files -- `laradock/workspace/xdebug.ini` -- `laradock/php-fpm/xdebug.ini` - -Set the following variables: - -``` -xdebug.remote_autostart=1 -xdebug.remote_enable=1 -xdebug.remote_connect_back=1 -xdebug.cli_color=1 -``` - - - -### Need to clean house first? - -Make sure you are starting with a clean state. For example, do you have other Laradock containers and images? -Here are a few things I use to clean things up. - -- Delete all containers using `grep laradock_` on the names, see: [Remove all containers based on docker image name](https://linuxconfig.org/remove-all-containners-based-on-docker-image-name). - -`docker ps -a | awk '{ print $1,$2 }' | grep laradock_ | awk '{print $1}' | xargs -I {} docker rm {}` - -- Delete all images containing `laradock`. - -`docker images | awk '{print $1,$2,$3}' | grep laradock_ | awk '{print $3}' | xargs -I {} docker rmi {}` -**Note:** This will only delete images that were built with `Laradock`, **NOT** `laradock/*` which are pulled down by `Laradock` such as `laradock/workspace`, etc. -**Note:** Some may fail with: -`Error response from daemon: conflict: unable to delete 3f38eaed93df (cannot be forced) - image has dependent child images` - -- I added this to my `.bashrc` to remove orphaned images. - -``` -dclean() { - processes=`docker ps -q -f status=exited` - if [ -n "$processes" ]; then - docker rm $processes - fi - - images=`docker images -q -f dangling=true` - if [ -n "$images" ]; then - docker rmi $images - fi -} -``` - -- If you frequently switch configurations for Laradock, you may find that adding the following and added to your `.bashrc` or equivalent useful: - -``` -# remove laravel* containers -# remove laravel_* images -dcleanlaradockfunction() -{ - echo 'Removing ALL containers associated with laradock' - docker ps -a | awk '{ print $1,$2 }' | grep laradock | awk '{print $1}' | xargs -I {} docker rm {} - - # remove ALL images associated with laradock_ - # does NOT delete laradock/* which are hub images - echo 'Removing ALL images associated with laradock_' - docker images | awk '{print $1,$2,$3}' | grep laradock_ | awk '{print $3}' | xargs -I {} docker rmi {} - - echo 'Listing all laradock docker hub images...' - docker images | grep laradock - - echo 'dcleanlaradock completed' -} -# associate the above function with an alias -# so can recall/lookup by typing 'alias' -alias dcleanlaradock=dcleanlaradockfunction -``` - - -## Let's get a dial-tone with Laravel - -``` -# barebones at this point -docker-compose up -d nginx mysql - -# run -docker-compose ps - -# Should see: - Name Command State Ports ------------------------------------------------------------------------------------------------------------ -laradock_mysql_1 docker-entrypoint.sh mysqld Up 0.0.0.0:3306->3306/tcp -laradock_nginx_1 nginx Up 0.0.0.0:443->443/tcp, 0.0.0.0:80->80/tcp -laradock_php-fpm_1 php-fpm Up 9000/tcp -laradock_volumes_data_1 true Exit 0 -laradock_volumes_source_1 true Exit 0 -laradock_workspace_1 /sbin/my_init Up 0.0.0.0:2222->22/tcp -``` - - -## Enable xDebug on php-fpm - -In a host terminal sitting in the laradock folder, run: `./php-fpm/xdebug status` -You should see something like the following: - -``` -xDebug status -laradock_php-fpm_1 -PHP 7.0.9 (cli) (built: Aug 10 2016 19:45:48) ( NTS ) -Copyright (c) 1997-2016 The PHP Group -Zend Engine v3.0.0, Copyright (c) 1998-2016 Zend Technologies - with Xdebug v2.4.1, Copyright (c) 2002-2016, by Derick Rethans -``` - -Other commands include `./php-fpm/xdebug start | stop`. - -If you have enabled `xdebug=true` in `docker-compose.yml/php-fpm`, `xdebug` will already be running when -`php-fpm` is started and listening for debug info on port 9000. - - - -## PHPStorm Settings - -- Here are some settings that are known to work: - - `Settings/BuildDeploymentConnection` - - ![Settings/BuildDeploymentConnection](/images/photos/PHPStorm/Settings/BuildDeploymentConnection.png) - - - `Settings/BuildDeploymentConnectionMappings` - - ![Settings/BuildDeploymentConnectionMappings](/images/photos/PHPStorm/Settings/BuildDeploymentConnectionMappings.png) - - - `Settings/BuildDeploymentDebugger` - - ![Settings/BuildDeploymentDebugger](/images/photos/PHPStorm/Settings/BuildDeploymentDebugger.png) - - - `Settings/EditRunConfigurationRemoteWebDebug` - - ![Settings/EditRunConfigurationRemoteWebDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteWebDebug.png) - - - `Settings/EditRunConfigurationRemoteExampleTestDebug` - - ![Settings/EditRunConfigurationRemoteExampleTestDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteExampleTestDebug.png) - - - `Settings/LangsPHPDebug` - - ![Settings/LangsPHPDebug](/images/photos/PHPStorm/Settings/LangsPHPDebug.png) - - - `Settings/LangsPHPInterpreters` - - ![Settings/LangsPHPInterpreters](/images/photos/PHPStorm/Settings/LangsPHPInterpreters.png) - - - `Settings/LangsPHPPHPUnit` - - ![Settings/LangsPHPPHPUnit](/images/photos/PHPStorm/Settings/LangsPHPPHPUnit.png) - - - `Settings/LangsPHPServers` - - ![Settings/LangsPHPServers](/images/photos/PHPStorm/Settings/LangsPHPServers.png) - - - `RemoteHost` - To switch on this view, go to: `Menu/Tools/Deployment/Browse Remote Host`. - - ![RemoteHost](/images/photos/PHPStorm/RemoteHost.png) - - - `RemoteWebDebug` - - ![DebugRemoteOn](/images/photos/PHPStorm/DebugRemoteOn.png) - - - `EditRunConfigurationRemoteWebDebug` - Go to: `Menu/Run/Edit Configurations`. - - ![EditRunConfigurationRemoteWebDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteWebDebug.png) - - - `EditRunConfigurationRemoteExampleTestDebug` - Go to: `Menu/Run/Edit Configurations`. - - ![EditRunConfigurationRemoteExampleTestDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteExampleTestDebug.png) - - - `WindowsFirewallAllowedApps` - Go to: `Control Panel\All Control Panel Items\Windows Firewall\Allowed apps`. - - ![WindowsFirewallAllowedApps.png](/images/photos/PHPStorm/Settings/WindowsFirewallAllowedApps.png) - - - `hosts` - Edit: `C:\Windows\System32\drivers\etc\hosts`. - - ![WindowsFirewallAllowedApps.png](/images/photos/PHPStorm/Settings/hosts.png) - - - [Enable xDebug on php-fpm](#enablePhpXdebug) - - - - -## Usage - - -### Run ExampleTest -- right-click on `tests/ExampleTest.php` - - Select: `Run 'ExampleTest.php'` or `Ctrl+Shift+F10`. - - Should pass!! You just ran a remote test via SSH! - - -### Debug ExampleTest -- Open to edit: `tests/ExampleTest.php` -- Add a BreakPoint on line 16: `$this->visit('/')` -- right-click on `tests/ExampleTest.php` - - Select: `Debug 'ExampleTest.php'`. - - Should have stopped at the BreakPoint!! You are now debugging locally against a remote Laravel project via SSH! - - ![Remote Test Debugging Success](/images/photos/PHPStorm/RemoteTestDebuggingSuccess.png) - - - -### Debug WebSite -- In case xDebug is disabled, from the `laradock` folder run: -`./php-fpm/xdebug start`. - - To switch xdebug off, run: -`./php-fpm/xdebug stop` - -- Start Remote Debugging - - ![DebugRemoteOn](/images/photos/PHPStorm/DebugRemoteOn.png) - -- Open to edit: `bootstrap/app.php` -- Add a BreakPoint on line 14: `$app = new Illuminate\Foundation\Application(` -- Reload [Laravel Site](http://laravel/) - - Should have stopped at the BreakPoint!! You are now debugging locally against a remote Laravel project via SSH! - - ![Remote Debugging Success](/images/photos/PHPStorm/RemoteDebuggingSuccess.png) - - - -## Let's shell into workspace -Assuming that you are in laradock folder, type: -`ssh -i workspace/insecure_id_rsa -p2222 root@laravel` -**Cha Ching!!!!** -- `workspace/insecure_id_rsa.ppk` may become corrupted. In which case: - - fire up `puttygen` - - import `workspace/insecure_id_rsa` - - save private key to `workspace/insecure_id_rsa.ppk` - - - -### KiTTY -[Kitty](http://www.9bis.net/kitty/) KiTTY is a fork from version 0.67 of PuTTY. - -- Here are some settings that are working for me: - - ![Session](/images/photos/KiTTY/Session.png) - - ![Terminal](/images/photos/KiTTY/Terminal.png) - - ![Window](/images/photos/KiTTY/Window.png) - - ![WindowAppearance](/images/photos/KiTTY/WindowAppearance.png) - - ![Connection](/images/photos/KiTTY/Connection.png) - - ![ConnectionData](/images/photos/KiTTY/ConnectionData.png) - - ![ConnectionSSH](/images/photos/KiTTY/ConnectionSSH.png) - - ![ConnectionSSHAuth](/images/photos/KiTTY/ConnectionSSHAuth.png) - - ![TerminalShell](/images/photos/KiTTY/TerminalShell.png) -


@@ -563,13 +238,9 @@ Assuming that you are in laradock folder, type:
-# Running Laravel Dusk Tests +## Running Laravel Dusk Tests -- [Option 1: Without Selenium](#option1-dusk) -- [Option 2: With Selenium](#option2-dusk) - - -## Option 1: Without Selenium +### Option 1: Without Selenium - [Intro](#option1-dusk-intro) - [Workspace Setup](#option1-workspace-setup) @@ -577,14 +248,12 @@ Assuming that you are in laradock folder, type: - [Choose Chrome Driver Version (Optional)](#option1-choose-chrome-driver-version) - [Run Dusk Tests](#option1-run-dusk-tests) - -### Intro +#### Intro This is a guide to run Dusk tests in your `workspace` container with headless google-chrome and chromedriver. It has been tested with Laravel 5.4 and 5.5. - -### Workspace Setup +#### Workspace Setup Update your .env with following entries: @@ -604,8 +273,7 @@ Then run below to build your workspace. docker-compose build workspace ``` - -### Application Setup +#### Application Setup Run a `workspace` container and you will be inside the container at `/var/www` directory. @@ -670,8 +338,7 @@ abstract class DuskTestCase extends BaseTestCase } ``` - -### Choose Chrome Driver Version (Optional) +#### Choose Chrome Driver Version (Optional) You could choose to use either: @@ -725,8 +392,7 @@ abstract class DuskTestCase extends BaseTestCase } ``` - -### Run Dusk Tests +#### Run Dusk Tests Run local server in `workspace` container and run Dusk tests. @@ -743,8 +409,7 @@ PHPUnit 6.4.0 by Sebastian Bergmann and contributors. Time: 837 ms, Memory: 6.00MB ``` - -## Option 2: With Selenium +### Option 2: With Selenium - [Intro](#dusk-intro) - [DNS Setup](#dns-setup) @@ -752,8 +417,7 @@ Time: 837 ms, Memory: 6.00MB - [Laravel Dusk Setup](#laravel-dusk-setup) - [Running Laravel Dusk Tests](#running-tests) - -### Intro +#### Intro Setting up Laravel Dusk tests to run with Laradock appears be something that eludes most Laradock users. This guide is designed to show you how to wire them up to work together. This guide is written with macOS and Linux in mind. As such, @@ -763,8 +427,7 @@ for Windows-specific instructions. This guide assumes you know how to use a DNS forwarder such as `dnsmasq` or are comfortable with editing the `/etc/hosts` file for one-off DNS changes. - -### DNS Setup +#### DNS Setup According to RFC-2606, only four TLDs are reserved for local testing[^1]: - `.test` @@ -797,8 +460,7 @@ For example, in your `/etc/hosts` file: This will ensure that when navigating to `myapp.test`, it will route the request to `127.0.0.1` which will be handled by Nginx in Laradock. - -### Docker Compose setup +#### Docker Compose setup In order to make the Selenium container talk to the Nginx container appropriately, the `docker-compose.yml` needs to be edited to accommodate this. Make the following changes: @@ -820,8 +482,7 @@ the Selenium container to make requests to the Nginx container, which is necessary for running Dusk tests. These changes also link the `nginx` environment variable to the domain you wired up in your hosts file. - -### Laravel Dusk Setup +#### Laravel Dusk Setup In order to make Laravel Dusk make the proper request to the Selenium container, you have to edit the `DuskTestCase.php` file that's provided on the initial @@ -831,13 +492,13 @@ Remote Web Driver attempts to use to set up the Selenium session. One recommendation for this is to add a separate config option in your `.env.dusk.local` so it's still possible to run your Dusk tests locally should you want to. -#### .env.dusk.local +##### .env.dusk.local ``` ... USE_SELENIUM=true ``` -#### DuskTestCase.php +##### DuskTestCase.php ```php abstract class DuskTestCase extends BaseTestCase { @@ -857,8 +518,7 @@ abstract class DuskTestCase extends BaseTestCase } ``` - -### Running Laravel Dusk Tests +#### Running Laravel Dusk Tests Now that you have everything set up, to run your Dusk tests, you have to SSH into the workspace container as you normally would: @@ -883,3 +543,326 @@ This invokes the Dusk command from inside the workspace container but when the s execution, it returns your session to your project directory. [^1]: [Don't Use .dev for Development](https://iyware.com/dont-use-dev-for-development/) + + +
+
+
+
+
+ + +## PHPStorm XDebug Setup + +- [Intro](#Intro) +- [Installation](#Installation) + - [Customize laradock/docker-compose.yml](#CustomizeDockerCompose) + - [Clean House](#InstallCleanHouse) + - [Laradock Dial Tone](#InstallLaradockDialTone) + - [hosts](#AddToHosts) + - [Firewall](#FireWall) + - [Enable xDebug on php-fpm](#enablePhpXdebug) + - [PHPStorm Settings](#InstallPHPStorm) + - [Configs](#InstallPHPStormConfigs) +- [Usage](#Usage) + - [Laravel](#UsageLaravel) + - [Run ExampleTest](#UsagePHPStormRunExampleTest) + - [Debug ExampleTest](#UsagePHPStormDebugExampleTest) + - [Debug Web Site](#UsagePHPStormDebugSite) +- [SSH into workspace](#SSHintoWorkspace) + - [KiTTY](#InstallKiTTY) + +### Intro + +Wiring up [Laravel](https://laravel.com/), [Laradock](https://github.com/Laradock/laradock) [Laravel+Docker] and [PHPStorm](https://www.jetbrains.com/phpstorm/) to play nice together complete with remote xdebug'ing as icing on top! Although this guide is based on `PHPStorm Windows`, +you should be able to adjust accordingly. This guide was written based on Docker for Windows Native. + +### Installation + +- This guide assumes the following: + - you have already installed and are familiar with Laravel, Laradock and PHPStorm. + - you have installed Laravel as a parent of `laradock`. This guide assumes `/c/_dk/laravel`. + +### hosts +- Add `laravel` to your hosts file located on Windows 10 at `C:\Windows\System32\drivers\etc\hosts`. It should be set to the IP of your running container. Mine is: `10.0.75.2` +On Windows you can find it by opening Windows `Hyper-V Manager`. + - ![Windows Hyper-V Manager](images/photos/PHPStorm/Settings/WindowsHyperVManager.png) + +- [Hosts File Editor](https://github.com/scottlerch/HostsFileEditor) makes it easy to change your hosts file. + - Set `laravel` to your docker host IP. See [Example](images/photos/SimpleHostsEditor/AddHost_laravel.png). + + +### Firewall +Your PHPStorm will need to be able to receive a connection from PHP xdebug either your running workspace or php-fpm containers on port 9000. This means that your Windows Firewall should either enable connections from the Application PHPStorm OR the port. + +- It is important to note that if the Application PHPStorm is NOT enabled in the firewall, you will not be able to recreate a rule to override that. +- Also be aware that if you are installing/upgrade different versions of PHPStorm, you MAY have orphaned references to PHPStorm in your Firewall! You may decide to remove orphaned references however in either case, make sure that they are set to receive public TCP traffic. + +#### Edit laradock/docker-compose.yml +Set the following variables: +``` +### Workspace Utilities Container ############### + + workspace: + build: + context: ./workspace + args: + - INSTALL_XDEBUG=true + - INSTALL_WORKSPACE_SSH=true + ... + + +### PHP-FPM Container ##################### + + php-fpm: + build: + context: ./php-fpm + args: + - INSTALL_XDEBUG=true + ... + +``` + +#### Edit xdebug.ini files +- `laradock/workspace/xdebug.ini` +- `laradock/php-fpm/xdebug.ini` + +Set the following variables: + +``` +xdebug.remote_autostart=1 +xdebug.remote_enable=1 +xdebug.remote_connect_back=1 +xdebug.cli_color=1 +``` + + +#### Need to clean house first? + +Make sure you are starting with a clean state. For example, do you have other Laradock containers and images? +Here are a few things I use to clean things up. + +- Delete all containers using `grep laradock_` on the names, see: [Remove all containers based on docker image name](https://linuxconfig.org/remove-all-containners-based-on-docker-image-name). + +`docker ps -a | awk '{ print $1,$2 }' | grep laradock_ | awk '{print $1}' | xargs -I {} docker rm {}` + +- Delete all images containing `laradock`. + +`docker images | awk '{print $1,$2,$3}' | grep laradock_ | awk '{print $3}' | xargs -I {} docker rmi {}` +**Note:** This will only delete images that were built with `Laradock`, **NOT** `laradock/*` which are pulled down by `Laradock` such as `laradock/workspace`, etc. +**Note:** Some may fail with: +`Error response from daemon: conflict: unable to delete 3f38eaed93df (cannot be forced) - image has dependent child images` + +- I added this to my `.bashrc` to remove orphaned images. + +``` +dclean() { + processes=`docker ps -q -f status=exited` + if [ -n "$processes" ]; then + docker rm $processes + fi + + images=`docker images -q -f dangling=true` + if [ -n "$images" ]; then + docker rmi $images + fi +} +``` + +- If you frequently switch configurations for Laradock, you may find that adding the following and added to your `.bashrc` or equivalent useful: + +``` +# remove laravel* containers +# remove laravel_* images +dcleanlaradockfunction() +{ + echo 'Removing ALL containers associated with laradock' + docker ps -a | awk '{ print $1,$2 }' | grep laradock | awk '{print $1}' | xargs -I {} docker rm {} + + # remove ALL images associated with laradock_ + # does NOT delete laradock/* which are hub images + echo 'Removing ALL images associated with laradock_' + docker images | awk '{print $1,$2,$3}' | grep laradock_ | awk '{print $3}' | xargs -I {} docker rmi {} + + echo 'Listing all laradock docker hub images...' + docker images | grep laradock + + echo 'dcleanlaradock completed' +} +# associate the above function with an alias +# so can recall/lookup by typing 'alias' +alias dcleanlaradock=dcleanlaradockfunction +``` + + +### Let's get a dial-tone with Laravel + +``` +# barebones at this point +docker-compose up -d nginx mysql + +# run +docker-compose ps + +# Should see: + Name Command State Ports +----------------------------------------------------------------------------------------------------------- +laradock_mysql_1 docker-entrypoint.sh mysqld Up 0.0.0.0:3306->3306/tcp +laradock_nginx_1 nginx Up 0.0.0.0:443->443/tcp, 0.0.0.0:80->80/tcp +laradock_php-fpm_1 php-fpm Up 9000/tcp +laradock_volumes_data_1 true Exit 0 +laradock_volumes_source_1 true Exit 0 +laradock_workspace_1 /sbin/my_init Up 0.0.0.0:2222->22/tcp +``` + +### Enable xDebug on php-fpm + +In a host terminal sitting in the laradock folder, run: `./php-fpm/xdebug status` +You should see something like the following: + +``` +xDebug status +laradock_php-fpm_1 +PHP 7.0.9 (cli) (built: Aug 10 2016 19:45:48) ( NTS ) +Copyright (c) 1997-2016 The PHP Group +Zend Engine v3.0.0, Copyright (c) 1998-2016 Zend Technologies + with Xdebug v2.4.1, Copyright (c) 2002-2016, by Derick Rethans +``` + +Other commands include `./php-fpm/xdebug start | stop`. + +If you have enabled `xdebug=true` in `docker-compose.yml/php-fpm`, `xdebug` will already be running when +`php-fpm` is started and listening for debug info on port 9000. + + +### PHPStorm Settings + +- Here are some settings that are known to work: + - `Settings/BuildDeploymentConnection` + - ![Settings/BuildDeploymentConnection](/images/photos/PHPStorm/Settings/BuildDeploymentConnection.png) + + - `Settings/BuildDeploymentConnectionMappings` + - ![Settings/BuildDeploymentConnectionMappings](/images/photos/PHPStorm/Settings/BuildDeploymentConnectionMappings.png) + + - `Settings/BuildDeploymentDebugger` + - ![Settings/BuildDeploymentDebugger](/images/photos/PHPStorm/Settings/BuildDeploymentDebugger.png) + + - `Settings/EditRunConfigurationRemoteWebDebug` + - ![Settings/EditRunConfigurationRemoteWebDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteWebDebug.png) + + - `Settings/EditRunConfigurationRemoteExampleTestDebug` + - ![Settings/EditRunConfigurationRemoteExampleTestDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteExampleTestDebug.png) + + - `Settings/LangsPHPDebug` + - ![Settings/LangsPHPDebug](/images/photos/PHPStorm/Settings/LangsPHPDebug.png) + + - `Settings/LangsPHPInterpreters` + - ![Settings/LangsPHPInterpreters](/images/photos/PHPStorm/Settings/LangsPHPInterpreters.png) + + - `Settings/LangsPHPPHPUnit` + - ![Settings/LangsPHPPHPUnit](/images/photos/PHPStorm/Settings/LangsPHPPHPUnit.png) + + - `Settings/LangsPHPServers` + - ![Settings/LangsPHPServers](/images/photos/PHPStorm/Settings/LangsPHPServers.png) + + - `RemoteHost` + To switch on this view, go to: `Menu/Tools/Deployment/Browse Remote Host`. + - ![RemoteHost](/images/photos/PHPStorm/RemoteHost.png) + + - `RemoteWebDebug` + - ![DebugRemoteOn](/images/photos/PHPStorm/DebugRemoteOn.png) + + - `EditRunConfigurationRemoteWebDebug` + Go to: `Menu/Run/Edit Configurations`. + - ![EditRunConfigurationRemoteWebDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteWebDebug.png) + + - `EditRunConfigurationRemoteExampleTestDebug` + Go to: `Menu/Run/Edit Configurations`. + - ![EditRunConfigurationRemoteExampleTestDebug](/images/photos/PHPStorm/Settings/EditRunConfigurationRemoteExampleTestDebug.png) + + - `WindowsFirewallAllowedApps` + Go to: `Control Panel\All Control Panel Items\Windows Firewall\Allowed apps`. + - ![WindowsFirewallAllowedApps.png](/images/photos/PHPStorm/Settings/WindowsFirewallAllowedApps.png) + + - `hosts` + Edit: `C:\Windows\System32\drivers\etc\hosts`. + - ![WindowsFirewallAllowedApps.png](/images/photos/PHPStorm/Settings/hosts.png) + + - [Enable xDebug on php-fpm](#enablePhpXdebug) + + + +### Usage + +#### Run ExampleTest +- right-click on `tests/ExampleTest.php` + - Select: `Run 'ExampleTest.php'` or `Ctrl+Shift+F10`. + - Should pass!! You just ran a remote test via SSH! + +#### Debug ExampleTest +- Open to edit: `tests/ExampleTest.php` +- Add a BreakPoint on line 16: `$this->visit('/')` +- right-click on `tests/ExampleTest.php` + - Select: `Debug 'ExampleTest.php'`. + - Should have stopped at the BreakPoint!! You are now debugging locally against a remote Laravel project via SSH! + - ![Remote Test Debugging Success](/images/photos/PHPStorm/RemoteTestDebuggingSuccess.png) + +#### Debug WebSite +- In case xDebug is disabled, from the `laradock` folder run: +`./php-fpm/xdebug start`. + - To switch xdebug off, run: +`./php-fpm/xdebug stop` + +- Start Remote Debugging + - ![DebugRemoteOn](/images/photos/PHPStorm/DebugRemoteOn.png) + +- Open to edit: `bootstrap/app.php` +- Add a BreakPoint on line 14: `$app = new Illuminate\Foundation\Application(` +- Reload [Laravel Site](http://laravel/) + - Should have stopped at the BreakPoint!! You are now debugging locally against a remote Laravel project via SSH! + - ![Remote Debugging Success](/images/photos/PHPStorm/RemoteDebuggingSuccess.png) + + +### Let's shell into workspace +Assuming that you are in laradock folder, type: +`ssh -i workspace/insecure_id_rsa -p2222 root@laravel` +**Cha Ching!!!!** +- `workspace/insecure_id_rsa.ppk` may become corrupted. In which case: + - fire up `puttygen` + - import `workspace/insecure_id_rsa` + - save private key to `workspace/insecure_id_rsa.ppk` + +#### KiTTY +[Kitty](http://www.9bis.net/kitty/) KiTTY is a fork from version 0.67 of PuTTY. + +- Here are some settings that are working for me: + - ![Session](/images/photos/KiTTY/Session.png) + - ![Terminal](/images/photos/KiTTY/Terminal.png) + - ![Window](/images/photos/KiTTY/Window.png) + - ![WindowAppearance](/images/photos/KiTTY/WindowAppearance.png) + - ![Connection](/images/photos/KiTTY/Connection.png) + - ![ConnectionData](/images/photos/KiTTY/ConnectionData.png) + - ![ConnectionSSH](/images/photos/KiTTY/ConnectionSSH.png) + - ![ConnectionSSHAuth](/images/photos/KiTTY/ConnectionSSHAuth.png) + - ![TerminalShell](/images/photos/KiTTY/TerminalShell.png) + +
+
+
+
+
+ + +## Setup remote debugging for PhpStorm on Linux + + - Make sure you have followed the steps above in the [Install Xdebug section](#install-xdebug). + + - Make sure Xdebug accepts connections and listens on port 9000. (Should be default configuration). + +![Debug Configuration](/images/photos/PHPStorm/linux/configuration/debugConfiguration.png "Debug Configuration"). + + - Create a server with name `laradock` (matches **PHP_IDE_CONFIG** key in environment file) and make sure to map project root path with server correctly. + +![Server Configuration](/images/photos/PHPStorm/linux/configuration/serverConfiguration.png "Server Configuration"). + + - Start listening for debug connections, place a breakpoint and you are good to go ! diff --git a/DOCUMENTATION/content/help/index.md b/DOCUMENTATION/content/help/index.md index 3f2342de..1d971c51 100644 --- a/DOCUMENTATION/content/help/index.md +++ b/DOCUMENTATION/content/help/index.md @@ -1,5 +1,5 @@ --- -title: Help & Questions +title: 5. Help & Questions type: index weight: 5 --- @@ -7,3 +7,121 @@ weight: 5 Join the chat room on [Gitter](https://gitter.im/Laradock/laradock) and get help and support from the community. You can as well can open an [issue](https://github.com/laradock/laradock/issues) on Github (will be labeled as Question) and discuss it with people on [Gitter](https://gitter.im/Laradock/laradock). + + +
+ +# Common Problems + +*Here's a list of the common problems you might face, and the possible solutions.* + + +
+## I see a blank (white) page instead of the Laravel 'Welcome' page! + +Run the following command from the Laravel root directory: + +```bash +sudo chmod -R 777 storage bootstrap/cache +``` + + + + + + +
+## I see "Welcome to nginx" instead of the Laravel App! + +Use `http://127.0.0.1` instead of `http://localhost` in your browser. + + + + + + +
+## I see an error message containing `address already in use` or `port is already allocated` + +Make sure the ports for the services that you are trying to run (22, 80, 443, 3306, etc.) are not being used already by other programs on the host, such as a built in `apache`/`httpd` service or other development tools you have installed. + + + + + + +
+## I get NGINX error 404 Not Found on Windows. + +1. Go to docker Settings on your Windows machine. +2. Click on the `Shared Drives` tab and check the drive that contains your project files. +3. Enter your windows username and password. +4. Go to the `reset` tab and click restart docker. + + + + + + +
+## The time in my services does not match the current time + +1. Make sure you've [changed the timezone](#Change-the-timezone). +2. Stop and rebuild the containers (`docker-compose up -d --build `) + + + + + + +
+## I get MySQL connection refused + +This error sometimes happens because your Laravel application isn't running on the container localhost IP (Which is 127.0.0.1). Steps to fix it: + +* Option A + 1. Check your running Laravel application IP by dumping `Request::ip()` variable using `dd(Request::ip())` anywhere on your application. The result is the IP of your Laravel container. + 2. Change the `DB_HOST` variable on env with the IP that you received from previous step. +* Option B + 1. Change the `DB_HOST` value to the same name as the MySQL docker container. The Laradock docker-compose file currently has this as `mysql` + +## I get stuck when building nginx on `fetch http://mirrors.aliyun.com/alpine/v3.5/main/x86_64/APKINDEX.tar.gz` + +As stated on [#749](https://github.com/laradock/laradock/issues/749#issuecomment-419652646), Already fixed,just set `CHANGE_SOURCE` to false. + +## Custom composer repo packagist url and npm registry url + +In China, the origin source of composer and npm is very slow. You can add `WORKSPACE_NPM_REGISTRY` and `WORKSPACE_COMPOSER_REPO_PACKAGIST` config in `.env` to use your custom source. + +Example: +```bash +WORKSPACE_NPM_REGISTRY=https://registry.npm.taobao.org +WORKSPACE_COMPOSER_REPO_PACKAGIST=https://packagist.phpcomposer.com +``` + +
+ +## I get `Module build failed: Error: write EPIPE` while compiling react application + +When you run `npm build` or `yarn dev` building a react application using webpack with elixir you may receive a `Error: write EPIPE` while processing .jpg images. + +This is caused of an outdated library for processing **.jpg files** in ubuntu 16.04. + +To fix the problem you can follow those steps + +1 - Open the `.env`. + +2 - Search for `WORKSPACE_INSTALL_LIBPNG` or add the key if missing. + +3 - Set the value to true: + +```dotenv +WORKSPACE_INSTALL_LIBPNG=true +``` + +4 - Finally rebuild the workspace image + +```bash +docker-compose build workspace +``` + diff --git a/DOCUMENTATION/content/introduction/index.md b/DOCUMENTATION/content/introduction/index.md index 65442707..143faa20 100644 --- a/DOCUMENTATION/content/introduction/index.md +++ b/DOCUMENTATION/content/introduction/index.md @@ -1,24 +1,41 @@ --- -title: Introduction +title: 1. Introduction type: index weight: 1 --- +## Use Docker First - Then Learn About It Later + +Laradock is a PHP development environment that runs on Docker. + +Supports a variety of useful Docker Images, pre-configured to provide a wonderful PHP development environment. -A full PHP development environment for Docker. - -Includes pre-packaged Docker Images, all pre-configured to provide a wonderful PHP development environment. - -Laradock is well known in the Laravel community, as the project started with single focus on running Laravel projects on Docker. Later and due to the large adoption from the PHP community, it started supporting other PHP projects like Symfony, CodeIgniter, WordPress, Drupal... +![](https://raw.githubusercontent.com/laradock/laradock/master/.github/home-page-images/laradock-logo.jpg) -![](https://s19.postimg.org/jblfytw9f/laradock-logo.jpg) + + + +## Sponsors + + + + + + + + + + +For basic sponsorships go to [Open Collective](https://opencollective.com/laradock#sponsor), for golden sponsorships contact support@laradock.io. +
+*Your logo will show up on the [github repository](https://github.com/laradock/laradock/) index page and the [documentation](http://laradock.io/) main page, with a link to your website.* ## Quick Overview -Let's see how easy it is to install `NGINX`, `PHP`, `Composer`, `MySQL`, `Redis` and `Beanstalkd`: +Let's see how easy it is to setup our demo stack `PHP`, `NGINX`, `MySQL`, `Redis` and `Composer`: 1 - Clone Laradock inside your PHP project: @@ -58,10 +75,10 @@ That's it! enjoy :) ## Features -- Easy switch between PHP versions: 7.2, 7.1, 5.6... +- Easy switch between PHP versions: 7.3, 7.2, 7.1, 5.6... - Choose your favorite database engine: MySQL, Postgres, MariaDB... -- Run your own combination of software: Memcached, HHVM, Beanstalkd... -- Every software runs on a separate container: PHP-FPM, NGINX, PHP-CLI... +- Run your own stack: Memcached, HHVM, RabbitMQ... +- Each software runs on its own container: PHP-FPM, NGINX, PHP-CLI... - Easy to customize any container, with simple edit to the `Dockerfile`. - All Images extends from an official base Image. (Trusted base Images). - Pre-configured NGINX to host any code at your root directory. @@ -71,39 +88,131 @@ That's it! enjoy :) - Latest version of the Docker Compose file (`docker-compose`). - Everything is visible and editable. - Fast Images Builds. -- More to come every week.. - -## Supported Software (Images) +## Supported Software (Docker Images) -In adhering to the separation of concerns principle as promoted by Docker, Laradock runs each software on its own Container. -You can turn On/Off as many instances of as any container without worrying about the configurations, everything works like a charm. +> Laradock, adheres to the 'separation of concerns' principle, thus it runs each software on its own Docker Container. +> You can turn On/Off as many instances as you want without worrying about the configurations. + +> To run a chosen container from the list below, run `docker-compose up -d {container-name}`. +> The container name `{container-name}` is the same as its folder name. Example to run the "PHP FPM" container use the name "php-fpm". + +- **Web Servers:** + - NGINX + - Apache2 + - Caddy + +- **Load Balancers:** + - HAProxy + - Traefik -- **Database Engines:** -MySQL - MariaDB - Percona - MongoDB - Neo4j - RethinkDB - MSSQL - PostgreSQL - Postgres-PostGIS. -- **Database Management:** -PhpMyAdmin - Adminer - PgAdmin -- **Cache Engines:** -Redis - Memcached - Aerospike -- **PHP Servers:** -NGINX - Apache2 - Caddy - **PHP Compilers:** -PHP FPM - HHVM -- **Message Queueing:** -Beanstalkd - RabbitMQ - PHP Worker -- **Queueing Management:** -Beanstalkd Console - RabbitMQ Console -- **Random Tools:** -Mailu - HAProxy - Certbot - Blackfire - Selenium - Jenkins - ElasticSearch - Kibana - Grafana - Gitlab - Mailhog - MailDev - Minio - Varnish - Swoole - NetData - Portainer - Laravel Echo - Phalcon... + - PHP FPM + - HHVM -Laradock introduces the **Workspace** Image, as a development environment. -It contains a rich set of helpful tools, all pre-configured to work and integrate with almost any combination of Containers and tools you may choose. +- **Database Management Systems:** + - MySQL + - PostgreSQL + - PostGIS + - MariaDB + - Percona + - MSSQL + - MongoDB + - MongoDB Web UI + - Neo4j + - CouchDB + - RethinkDB + - Cassandra -**Workspace Image Tools** -PHP CLI - Composer - Git - Linuxbrew - Node - V8JS - Gulp - SQLite - xDebug - Envoy - Deployer - Vim - Yarn - SOAP - Drush... + +- **Database Management Apps:** + - PhpMyAdmin + - Adminer + - PgAdmin + +- **Cache Engines:** + - Redis + - Redis Web UI + - Redis Cluster + - Memcached + - Aerospike + - Varnish + +- **Message Brokers:** + - RabbitMQ + - RabbitMQ Admin Console + - Beanstalkd + - Beanstalkd Admin Console + - Eclipse Mosquitto + - PHP Worker + - Laravel Horizon + +- **Mail Servers:** + - Mailu + - Mailhog + - MailDev + +- **Log Management:** + - GrayLog + +- **Testing:** + - Selenium + +- **Monitoring:** + - Grafana + - NetData + +- **Search Engines:** + - ElasticSearch + - Apache Solr + - Manticore Search + +- **IDE's** + - ICE Coder + - Theia + - Web IDE + +- **Miscellaneous:** + - Workspace *(Laradock container that includes a rich set of pre-configured useful tools)* + - `PHP CLI` + - `Composer` + - `Git` + - `Vim` + - `xDebug` + - `Linuxbrew` + - `Node` + - `V8JS` + - `Gulp` + - `SQLite` + - `Laravel Envoy` + - `Deployer` + - `Yarn` + - `SOAP` + - `Drush` + - `Wordpress CLI` + - Apache ZooKeeper *(Centralized service for distributed systems to a hierarchical key-value store)* + - Kibana *(Visualize your Elasticsearch data and navigate the Elastic Stack)* + - LogStash *(Server-side data processing pipeline that ingests data from a multitude of sources simultaneously)* + - Jenkins *(automation server, that provides plugins to support building, deploying and automating any project)* + - Certbot *(Automatically enable HTTPS on your website)* + - Swoole *(Production-Grade Async programming Framework for PHP)* + - SonarQube *(continuous inspection of code quality to perform automatic reviews with static analysis of code to detect bugs and more)* + - Gitlab *(A single application for the entire software development lifecycle)* + - PostGIS *(Database extender for PostgreSQL. It adds support for geographic objects allowing location queries to be run in SQL)* + - Blackfire *(Empowers all PHP developers and IT/Ops to continuously verify and improve their app's performance)* + - Laravel Echo *(Bring the power of WebSockets to your Laravel applications)* + - Phalcon *(A PHP web framework based on the model–view–controller pattern)* + - Minio *(Cloud storage server released under Apache License v2, compatible with Amazon S3)* + - AWS EB CLI *(CLI that helps you deploy and manage your AWS Elastic Beanstalk applications and environments)* + - Thumbor *(Photo thumbnail service)* + - IPython *(Provides a rich architecture for interactive computing)* + - Jupyter Hub *(Jupyter notebook for multiple users)* + - Portainer *(Build and manage your Docker environments with ease)* + - Docker Registry *(The Docker Registry implementation for storing and distributing Docker images)* + - Docker Web UI *(A browser-based solution for browsing and modifying a private Docker registry)* You can choose, which tools to install in your workspace container and other containers, from the `.env` file. @@ -112,30 +221,7 @@ You can choose, which tools to install in your workspace container and other con -If you can't find your Software in the list, build it yourself and submit it. Contributions are welcomed :) - - - -## Sponsors - - - - - -Support this project by becoming a sponsor. - -Your logo will show up on the [github repository](https://github.com/laradock/laradock/) index page and the [documentation](http://laradock.io/) main page, with a link to your website. [[Become a sponsor](https://opencollective.com/laradock#sponsor)] - - - - - - - - - - - +*If you can't find your Software in the list, build it yourself and submit it. Contributions are welcomed :)* @@ -172,7 +258,6 @@ Most importantly Docker can run on Development and on Production (same environme What's better than a **Demo Video**: -- Laradock v5.* (should be next!) - Laradock [v4.*](https://www.youtube.com/watch?v=TQii1jDa96Y) - Laradock [v2.*](https://www.youtube.com/watch?v=-DamFMczwDA) - Laradock [v0.3](https://www.youtube.com/watch?v=jGkyO6Is_aI) @@ -201,14 +286,14 @@ You are welcome to join our chat room on Gitter. > Help keeping the project development going, by [contributing](http://laradock.io/contributing) or donating a little. > Thanks in advance. -Donate directly via [Paypal](https://www.paypal.me/mzalt) +Donate directly via [Paypal](https://paypal.me/mzmmzz) -[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/mzalt) - -or become a backer on [Open Collective](https://opencollective.com/laradock#backer) - - +[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://paypal.me/mzmmzz) or show your support via [Beerpay](https://beerpay.io/laradock/laradock) [![Beerpay](https://beerpay.io/laradock/laradock/badge.svg?style=flat)](https://beerpay.io/laradock/laradock) + +or become a backer on [Open Collective](https://opencollective.com/laradock#backer) + + diff --git a/DOCUMENTATION/content/license/index.md b/DOCUMENTATION/content/license/index.md index 795d4c89..312b13d1 100644 --- a/DOCUMENTATION/content/license/index.md +++ b/DOCUMENTATION/content/license/index.md @@ -1,5 +1,5 @@ --- -title: License +title: 8. License type: index weight: 8 --- diff --git a/DOCUMENTATION/content/related-projects/index.md b/DOCUMENTATION/content/related-projects/index.md index bc37d9b5..a55343fb 100644 --- a/DOCUMENTATION/content/related-projects/index.md +++ b/DOCUMENTATION/content/related-projects/index.md @@ -1,5 +1,5 @@ --- -title: Related Projects +title: 6. Related Projects type: index weight: 6 --- diff --git a/DOCUMENTATION/static/ads.txt b/DOCUMENTATION/static/ads.txt new file mode 100644 index 00000000..22301965 --- /dev/null +++ b/DOCUMENTATION/static/ads.txt @@ -0,0 +1 @@ +google.com, pub-9826129398689742, DIRECT, f08c47fec0942fa0 diff --git a/adminer/Dockerfile b/adminer/Dockerfile index fb66a3e4..cb399771 100644 --- a/adminer/Dockerfile +++ b/adminer/Dockerfile @@ -1,7 +1,4 @@ -FROM adminer:4.3.0 - -# Version 4.3.1 contains PostgreSQL login errors. See docs. -# See https://sourceforge.net/p/adminer/bugs-and-features/548/ +FROM adminer:4 LABEL maintainer="Patrick Artounian " @@ -16,11 +13,15 @@ ARG INSTALL_MSSQL=false ENV INSTALL_MSSQL ${INSTALL_MSSQL} RUN if [ ${INSTALL_MSSQL} = true ]; then \ set -xe \ - && apk --update add --no-cache --virtual .phpize-deps $PHPIZE_DEPS unixodbc unixodbc-dev \ - && pecl channel-update pecl.php.net \ - && pecl install pdo_sqlsrv-4.1.8preview sqlsrv-4.1.8preview \ - && echo "extension=sqlsrv.so" > /usr/local/etc/php/conf.d/20-sqlsrv.ini \ - && echo "extension=pdo_sqlsrv.so" > /usr/local/etc/php/conf.d/20-pdo_sqlsrv.ini \ + # && apk --update add --no-cache --virtual .phpize-deps $PHPIZE_DEPS unixodbc unixodbc-dev \ + # && pecl channel-update pecl.php.net \ + # && pecl install pdo_sqlsrv-4.1.8preview sqlsrv-4.1.8preview \ + # && echo "extension=sqlsrv.so" > /usr/local/etc/php/conf.d/20-sqlsrv.ini \ + # && echo "extension=pdo_sqlsrv.so" > /usr/local/etc/php/conf.d/20-pdo_sqlsrv.ini \ + && apk --update add --no-cache freetds unixodbc \ + && apk --update add --no-cache --virtual .build-deps $PHPIZE_DEPS freetds-dev unixodbc-dev \ + && docker-php-ext-install pdo_dblib \ + && apk del .build-deps \ ;fi USER adminer diff --git a/aerospike/Dockerfile b/aerospike/Dockerfile index a85bc20c..abf0e371 100644 --- a/aerospike/Dockerfile +++ b/aerospike/Dockerfile @@ -1,7 +1,3 @@ FROM aerospike:latest LABEL maintainer="Luciano Jr " - -RUN rm /etc/aerospike/aerospike.conf - -COPY aerospike.conf /etc/aerospike/aerospike.conf diff --git a/aerospike/aerospike.conf b/aerospike/aerospike.conf deleted file mode 100644 index 5e577759..00000000 --- a/aerospike/aerospike.conf +++ /dev/null @@ -1,77 +0,0 @@ -# Aerospike database configuration file. - -# This stanza must come first. -service { - user root - group root - paxos-single-replica-limit 1 # Number of nodes where the replica count is automatically reduced to 1. - pidfile /var/run/aerospike/asd.pid - service-threads 4 - transaction-queues 4 - transaction-threads-per-queue 4 - proto-fd-max 15000 -} - -logging { - - # Log file must be an absolute path. - file /var/log/aerospike/aerospike.log { - context any info - } - - # Send log messages to stdout - console { - context any critical - } -} - -network { - service { - address any - port 3000 - - # Uncomment the following to set the `access-address` parameter to the - # IP address of the Docker host. This will the allow the server to correctly - # publish the address which applications and other nodes in the cluster to - # use when addressing this node. - # access-address - } - - heartbeat { - - # mesh is used for environments that do not support multicast - mode mesh - port 3002 - - # use asinfo -v 'tip:host=;port=3002' to inform cluster of - # other mesh nodes - mesh-port 3002 - - interval 150 - timeout 10 - } - - fabric { - port 3001 - } - - info { - port 3003 - } -} - -namespace test { - replication-factor 2 - memory-size 1G - default-ttl 5d # 5 days, use 0 to never expire/evict. - - # storage-engine memory - - # To use file storage backing, comment out the line above and use the - # following lines instead. - storage-engine device { - file /opt/aerospike/data/test.dat - filesize 4G - data-in-memory true # Store data in memory in addition to file. - } -} diff --git a/aws/.gitignore b/aws-eb-cli/.gitignore similarity index 100% rename from aws/.gitignore rename to aws-eb-cli/.gitignore diff --git a/aws/Dockerfile b/aws-eb-cli/Dockerfile similarity index 100% rename from aws/Dockerfile rename to aws-eb-cli/Dockerfile diff --git a/beanstalkd/Dockerfile b/beanstalkd/Dockerfile index b95a3519..967fac77 100644 --- a/beanstalkd/Dockerfile +++ b/beanstalkd/Dockerfile @@ -1,16 +1,7 @@ -FROM phusion/baseimage:latest - +FROM alpine LABEL maintainer="Mahmoud Zalt " -ENV DEBIAN_FRONTEND noninteractive -ENV PATH /usr/local/rvm/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - -RUN apt-get update -RUN apt-get install -y beanstalkd -RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* - -VOLUME /var/lib/beanstalkd/data +RUN apk add --no-cache beanstalkd EXPOSE 11300 - -CMD ["/usr/bin/beanstalkd"] +ENTRYPOINT ["/usr/bin/beanstalkd"] diff --git a/caddy/Dockerfile b/caddy/Dockerfile index c987b7e0..c9b74b70 100644 --- a/caddy/Dockerfile +++ b/caddy/Dockerfile @@ -1,27 +1,5 @@ -FROM golang:alpine +FROM abiosoft/caddy:no-stats -LABEL maintainer="Huadong Zuo " +CMD ["--conf", "/etc/caddy/Caddyfile", "--log", "stdout", "--agree=true"] -RUN apk add --no-cache \ - openssh \ - git \ - build-base && \ - go get github.com/abiosoft/caddyplug/caddyplug \ - && caddyplug install-caddy \ - apk del build-base - -ARG plugins="cors" - -## ARG plugins="cors cgi cloudflare azure linode" - -RUN caddyplug install ${plugins} - -RUN apk add --no-cache inotify-tools \ - && echo -e "#!/bin/sh\nwhile inotifywait -e modify /etc/caddy; do\n\tpkill caddy\ndone " >> /start.sh \ - && chmod +x /start.sh - -EXPOSE 80 443 - -WORKDIR /var/www/public - -CMD ["sh","-c","/start.sh & /usr/bin/caddy -conf /etc/caddy/Caddyfile -agree"] +EXPOSE 80 443 2015 diff --git a/cassandra/Dockerfile b/cassandra/Dockerfile new file mode 100644 index 00000000..cdf280aa --- /dev/null +++ b/cassandra/Dockerfile @@ -0,0 +1,5 @@ +ARG CASSANDRA_VERSION=latest +FROM bitnami/cassandra:${CASSANDRA_VERSION} + +LABEL maintainer="Stefan Neuhaus " + diff --git a/couchdb/Dockerfile b/couchdb/Dockerfile new file mode 100644 index 00000000..b1154bc1 --- /dev/null +++ b/couchdb/Dockerfile @@ -0,0 +1,3 @@ +FROM couchdb + +EXPOSE 5984 diff --git a/docker-compose.yml b/docker-compose.yml index 2965a678..462b085d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -39,6 +39,14 @@ volumes: driver: ${VOLUMES_DRIVER} elasticsearch: driver: ${VOLUMES_DRIVER} + mosquitto: + driver: ${VOLUMES_DRIVER} + confluence: + driver: ${VOLUMES_DRIVER} + sonarqube: + driver: ${VOLUMES_DRIVER} + cassandra: + driver: ${VOLUMES_DRIVER} services: @@ -56,10 +64,12 @@ services: - INSTALL_SSH2=${WORKSPACE_INSTALL_SSH2} - INSTALL_GMP=${WORKSPACE_INSTALL_GMP} - INSTALL_SOAP=${WORKSPACE_INSTALL_SOAP} + - INSTALL_XSL=${WORKSPACE_INSTALL_XSL} - INSTALL_LDAP=${WORKSPACE_INSTALL_LDAP} - INSTALL_IMAP=${WORKSPACE_INSTALL_IMAP} - INSTALL_MONGO=${WORKSPACE_INSTALL_MONGO} - INSTALL_AMQP=${WORKSPACE_INSTALL_AMQP} + - INSTALL_CASSANDRA=${WORKSPACE_INSTALL_CASSANDRA} - INSTALL_PHPREDIS=${WORKSPACE_INSTALL_PHPREDIS} - INSTALL_MSSQL=${WORKSPACE_INSTALL_MSSQL} - INSTALL_NODE=${WORKSPACE_INSTALL_NODE} @@ -68,12 +78,14 @@ services: - INSTALL_NPM_GULP=${WORKSPACE_INSTALL_NPM_GULP} - INSTALL_NPM_BOWER=${WORKSPACE_INSTALL_NPM_BOWER} - INSTALL_NPM_VUE_CLI=${WORKSPACE_INSTALL_NPM_VUE_CLI} + - INSTALL_NPM_ANGULAR_CLI=${WORKSPACE_INSTALL_NPM_ANGULAR_CLI} - INSTALL_DRUSH=${WORKSPACE_INSTALL_DRUSH} + - INSTALL_WP_CLI=${WORKSPACE_INSTALL_WP_CLI} - INSTALL_DRUPAL_CONSOLE=${WORKSPACE_INSTALL_DRUPAL_CONSOLE} - INSTALL_AEROSPIKE=${WORKSPACE_INSTALL_AEROSPIKE} - - AEROSPIKE_PHP_REPOSITORY=${AEROSPIKE_PHP_REPOSITORY} - INSTALL_V8JS=${WORKSPACE_INSTALL_V8JS} - COMPOSER_GLOBAL_INSTALL=${WORKSPACE_COMPOSER_GLOBAL_INSTALL} + - COMPOSER_AUTH=${WORKSPACE_COMPOSER_AUTH} - COMPOSER_REPO_PACKAGIST=${WORKSPACE_COMPOSER_REPO_PACKAGIST} - INSTALL_WORKSPACE_SSH=${WORKSPACE_INSTALL_WORKSPACE_SSH} - INSTALL_LARAVEL_ENVOY=${WORKSPACE_INSTALL_LARAVEL_ENVOY} @@ -91,9 +103,12 @@ services: - INSTALL_PG_CLIENT=${WORKSPACE_INSTALL_PG_CLIENT} - INSTALL_PHALCON=${WORKSPACE_INSTALL_PHALCON} - INSTALL_SWOOLE=${WORKSPACE_INSTALL_SWOOLE} + - INSTALL_TAINT=${WORKSPACE_INSTALL_TAINT} - INSTALL_LIBPNG=${WORKSPACE_INSTALL_LIBPNG} - INSTALL_IONCUBE=${WORKSPACE_INSTALL_IONCUBE} - INSTALL_MYSQL_CLIENT=${WORKSPACE_INSTALL_MYSQL_CLIENT} + - INSTALL_PING=${WORKSPACE_INSTALL_PING} + - INSTALL_SSHPASS=${WORKSPACE_INSTALL_SSHPASS} - PUID=${WORKSPACE_PUID} - PGID=${WORKSPACE_PGID} - CHROME_DRIVER_VERSION=${WORKSPACE_CHROME_DRIVER_VERSION} @@ -103,8 +118,14 @@ services: - TZ=${WORKSPACE_TIMEZONE} - BLACKFIRE_CLIENT_ID=${BLACKFIRE_CLIENT_ID} - BLACKFIRE_CLIENT_TOKEN=${BLACKFIRE_CLIENT_TOKEN} + - INSTALL_POWERLINE=${WORKSPACE_INSTALL_POWERLINE} + - INSTALL_FFMPEG=${WORKSPACE_INSTALL_FFMPEG} + - INSTALL_GNU_PARALLEL=${WORKSPACE_INSTALL_GNU_PARALLEL} + - http_proxy + - https_proxy + - no_proxy volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} extra_hosts: - "dockerhost:${DOCKER_HOST_IP}" ports: @@ -131,11 +152,12 @@ services: - INSTALL_BLACKFIRE=${INSTALL_BLACKFIRE} - INSTALL_SSH2=${PHP_FPM_INSTALL_SSH2} - INSTALL_SOAP=${PHP_FPM_INSTALL_SOAP} + - INSTALL_XSL=${PHP_FPM_INSTALL_XSL} - INSTALL_IMAP=${PHP_FPM_INSTALL_IMAP} - INSTALL_MONGO=${PHP_FPM_INSTALL_MONGO} - INSTALL_AMQP=${PHP_FPM_INSTALL_AMQP} + - INSTALL_CASSANDRA=${PHP_FPM_INSTALL_CASSANDRA} - INSTALL_MSSQL=${PHP_FPM_INSTALL_MSSQL} - - INSTALL_ZIP_ARCHIVE=${PHP_FPM_INSTALL_ZIP_ARCHIVE} - INSTALL_BCMATH=${PHP_FPM_INSTALL_BCMATH} - INSTALL_GMP=${PHP_FPM_INSTALL_GMP} - INSTALL_PHPREDIS=${PHP_FPM_INSTALL_PHPREDIS} @@ -143,24 +165,37 @@ services: - INSTALL_OPCACHE=${PHP_FPM_INSTALL_OPCACHE} - INSTALL_EXIF=${PHP_FPM_INSTALL_EXIF} - INSTALL_AEROSPIKE=${PHP_FPM_INSTALL_AEROSPIKE} - - AEROSPIKE_PHP_REPOSITORY=${AEROSPIKE_PHP_REPOSITORY} - INSTALL_MYSQLI=${PHP_FPM_INSTALL_MYSQLI} - INSTALL_PGSQL=${PHP_FPM_INSTALL_PGSQL} - INSTALL_PG_CLIENT=${PHP_FPM_INSTALL_PG_CLIENT} + - INSTALL_POSTGIS=${PHP_FPM_INSTALL_POSTGIS} - INSTALL_INTL=${PHP_FPM_INSTALL_INTL} - INSTALL_GHOSTSCRIPT=${PHP_FPM_INSTALL_GHOSTSCRIPT} - INSTALL_LDAP=${PHP_FPM_INSTALL_LDAP} - INSTALL_PHALCON=${PHP_FPM_INSTALL_PHALCON} - INSTALL_SWOOLE=${PHP_FPM_INSTALL_SWOOLE} + - INSTALL_TAINT=${PHP_FPM_INSTALL_TAINT} - INSTALL_IMAGE_OPTIMIZERS=${PHP_FPM_INSTALL_IMAGE_OPTIMIZERS} - INSTALL_IMAGEMAGICK=${PHP_FPM_INSTALL_IMAGEMAGICK} - INSTALL_CALENDAR=${PHP_FPM_INSTALL_CALENDAR} - INSTALL_FAKETIME=${PHP_FPM_INSTALL_FAKETIME} - INSTALL_IONCUBE=${PHP_FPM_INSTALL_IONCUBE} + - INSTALL_APCU=${PHP_FPM_INSTALL_APCU} - INSTALL_YAML=${PHP_FPM_INSTALL_YAML} + - INSTALL_RDKAFKA=${PHP_FPM_INSTALL_RDKAFKA} + - INSTALL_ADDITIONAL_LOCALES=${PHP_FPM_INSTALL_ADDITIONAL_LOCALES} + - INSTALL_MYSQL_CLIENT=${PHP_FPM_INSTALL_MYSQL_CLIENT} + - INSTALL_PING=${PHP_FPM_INSTALL_PING} + - INSTALL_SSHPASS=${PHP_FPM_INSTALL_SSHPASS} + - ADDITIONAL_LOCALES=${PHP_FPM_ADDITIONAL_LOCALES} + - INSTALL_FFMPEG=${PHP_FPM_FFMPEG} + - INSTALL_XHPROF=${PHP_FPM_INSTALL_XHPROF} + - http_proxy + - https_proxy + - no_proxy volumes: - ./php-fpm/php${PHP_VERSION}.ini:/usr/local/etc/php/php.ini - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} expose: - "9000" extra_hosts: @@ -182,12 +217,24 @@ services: context: ./php-worker args: - PHP_VERSION=${PHP_VERSION} + - PHALCON_VERSION=${PHALCON_VERSION} - INSTALL_PGSQL=${PHP_WORKER_INSTALL_PGSQL} - INSTALL_BCMATH=${PHP_WORKER_INSTALL_BCMATH} + - INSTALL_PHALCON=${PHP_WORKER_INSTALL_PHALCON} - INSTALL_SOAP=${PHP_WORKER_INSTALL_SOAP} - INSTALL_ZIP_ARCHIVE=${PHP_WORKER_INSTALL_ZIP_ARCHIVE} + - INSTALL_MYSQL_CLIENT=${PHP_WORKER_INSTALL_MYSQL_CLIENT} + - INSTALL_AMQP=${PHP_WORKER_INSTALL_AMQP} + - INSTALL_CASSANDRA=${PHP_WORKER_INSTALL_CASSANDRA} + - INSTALL_GHOSTSCRIPT=${PHP_WORKER_INSTALL_GHOSTSCRIPT} + - INSTALL_SWOOLE=${PHP_WORKER_INSTALL_SWOOLE} + - INSTALL_TAINT=${PHP_WORKER_INSTALL_TAINT} + - INSTALL_FFMPEG=${PHP_WORKER_INSTALL_FFMPEG} + - INSTALL_GMP=${PHP_WORKER_INSTALL_GMP} + - PUID=${PHP_WORKER_PUID} + - PGID=${PHP_WORKER_PGID} volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} - ./php-worker/supervisord.d:/etc/supervisord.d depends_on: - workspace @@ -204,6 +251,8 @@ services: - INSTALL_PGSQL=${PHP_FPM_INSTALL_PGSQL} - INSTALL_BCMATH=${PHP_FPM_INSTALL_BCMATH} - INSTALL_MEMCACHED=${PHP_FPM_INSTALL_MEMCACHED} + - INSTALL_SOCKETS=${LARAVEL_HORIZON_INSTALL_SOCKETS} + - INSTALL_CASSANDRA=${PHP_FPM_INSTALL_CASSANDRA} volumes: - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} - ./laravel-horizon/supervisord.d:/etc/supervisord.d @@ -222,8 +271,11 @@ services: - PHP_UPSTREAM_CONTAINER=${NGINX_PHP_UPSTREAM_CONTAINER} - PHP_UPSTREAM_PORT=${NGINX_PHP_UPSTREAM_PORT} - CHANGE_SOURCE=${CHANGE_SOURCE} + - http_proxy + - https_proxy + - no_proxy volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} - ${NGINX_HOST_LOG_PATH}:/var/log/nginx - ${NGINX_SITES_PATH}:/etc/nginx/sites-available - ${NGINX_SSL_PATH}:/etc/nginx/ssl @@ -257,7 +309,7 @@ services: - PHP_UPSTREAM_TIMEOUT=${APACHE_PHP_UPSTREAM_TIMEOUT} - DOCUMENT_ROOT=${APACHE_DOCUMENT_ROOT} volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} - ${APACHE_HOST_LOG_PATH}:/var/log/apache2 - ${APACHE_SITES_PATH}:/etc/apache2/sites-available ports: @@ -273,7 +325,7 @@ services: hhvm: build: ./hhvm volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} expose: - "9000" depends_on: @@ -352,13 +404,20 @@ services: ### MariaDB ############################################## mariadb: - build: ./mariadb + build: + context: ./mariadb + args: + - http_proxy + - https_proxy + - no_proxy + - MARIADB_VERSION=${MARIADB_VERSION} volumes: - ${DATA_PATH_HOST}/mariadb:/var/lib/mysql - ${MARIADB_ENTRYPOINT_INITDB}:/docker-entrypoint-initdb.d ports: - "${MARIADB_PORT}:3306" environment: + - TZ=${WORKSPACE_TIMEZONE} - MYSQL_DATABASE=${MARIADB_DATABASE} - MYSQL_USER=${MARIADB_USER} - MYSQL_PASSWORD=${MARIADB_PASSWORD} @@ -378,6 +437,22 @@ services: - POSTGRES_DB=${POSTGRES_DB} - POSTGRES_USER=${POSTGRES_USER} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} + - GITLAB_POSTGRES_INIT=${GITLAB_POSTGRES_INIT} + - GITLAB_POSTGRES_USER=${GITLAB_POSTGRES_USER} + - GITLAB_POSTGRES_PASSWORD=${GITLAB_POSTGRES_PASSWORD} + - GITLAB_POSTGRES_DB=${GITLAB_POSTGRES_DB} + - JUPYTERHUB_POSTGRES_INIT=${JUPYTERHUB_POSTGRES_INIT} + - JUPYTERHUB_POSTGRES_USER=${JUPYTERHUB_POSTGRES_USER} + - JUPYTERHUB_POSTGRES_PASSWORD=${JUPYTERHUB_POSTGRES_PASSWORD} + - JUPYTERHUB_POSTGRES_DB=${JUPYTERHUB_POSTGRES_DB} + - SONARQUBE_POSTGRES_INIT=${SONARQUBE_POSTGRES_INIT} + - SONARQUBE_POSTGRES_DB=${SONARQUBE_POSTGRES_DB} + - SONARQUBE_POSTGRES_USER=${SONARQUBE_POSTGRES_USER} + - SONARQUBE_POSTGRES_PASSWORD=${SONARQUBE_POSTGRES_PASSWORD} + - POSTGRES_CONFLUENCE_INIT=${CONFLUENCE_POSTGRES_INIT} + - POSTGRES_CONFLUENCE_DB=${CONFLUENCE_POSTGRES_DB} + - POSTGRES_CONFLUENCE_USER=${CONFLUENCE_POSTGRES_USER} + - POSTGRES_CONFLUENCE_PASSWORD=${CONFLUENCE_POSTGRES_PASSWORD} networks: - backend @@ -438,6 +513,25 @@ services: networks: - backend +### Redis Cluster ########################################## + redis-cluster: + build: ./redis-cluster + ports: + - "${REDIS_CLUSTER_PORT_RANGE}:7000-7005" + networks: + - backend + +### ZooKeeper ######################################### + zookeeper: + build: ./zookeeper + volumes: + - ${DATA_PATH_HOST}/zookeeper/data:/data + - ${DATA_PATH_HOST}/zookeeper/datalog:/datalog + ports: + - "${ZOOKEEPER_PORT}:2181" + networks: + - backend + ### Aerospike ########################################## aerospike: build: ./aerospike @@ -449,6 +543,10 @@ services: - "${AEROSPIKE_FABRIC_PORT}:3001" - "${AEROSPIKE_HEARTBEAT_PORT}:3002" - "${AEROSPIKE_INFO_PORT}:3003" + environment: + - STORAGE_GB=${AEROSPIKE_STORAGE_GB} + - MEM_GB=${AEROSPIKE_MEM_GB} + - NAMESPACE=${AEROSPIKE_NAMESPACE} networks: - backend @@ -486,6 +584,41 @@ services: environment: - RABBITMQ_DEFAULT_USER=${RABBITMQ_DEFAULT_USER} - RABBITMQ_DEFAULT_PASS=${RABBITMQ_DEFAULT_PASS} + hostname: laradock-rabbitmq + volumes: + - ${DATA_PATH_HOST}/rabbitmq:/var/lib/rabbitmq + depends_on: + - php-fpm + networks: + - backend + +### Cassandra ############################################ + cassandra: + build: ./cassandra + ports: + - "${CASSANDRA_TRANSPORT_PORT_NUMBER}:7000" + - "${CASSANDRA_JMX_PORT_NUMBER}:7199" + - "${CASSANDRA_CQL_PORT_NUMBER}:9042" + privileged: true + environment: + - CASSANDRA_VERSION=${CASSANDRA_VERSION} + - CASSANDRA_TRANSPORT_PORT_NUMBER=${CASSANDRA_TRANSPORT_PORT_NUMBER} + - CASSANDRA_JMX_PORT_NUMBER=${CASSANDRA_JMX_PORT_NUMBER} + - CASSANDRA_CQL_PORT_NUMBER=${CASSANDRA_CQL_PORT_NUMBER} + - CASSANDRA_USER=${CASSANDRA_USER} + - CASSANDRA_PASSWORD_SEEDER=${CASSANDRA_PASSWORD_SEEDER} + - CASSANDRA_PASSWORD=${CASSANDRA_PASSWORD} + - CASSANDRA_NUM_TOKENS=${CASSANDRA_NUM_TOKENS} + - CASSANDRA_HOST=${CASSANDRA_HOST} + - CASSANDRA_CLUSTER_NAME=${CASSANDRA_CLUSTER_NAME} + - CASSANDRA_SEEDS=${CASSANDRA_SEEDS} + - CASSANDRA_ENDPOINT_SNITCH=${CASSANDRA_ENDPOINT_SNITCH} + - CASSANDRA_ENABLE_RPC=${CASSANDRA_ENABLE_RPC} + - CASSANDRA_DATACENTER=${CASSANDRA_DATACENTER} + - CASSANDRA_RACK=${CASSANDRA_RACK} + hostname: laradock-cassandra + volumes: + - ${DATA_PATH_HOST}/cassandra:/var/lib/cassandra depends_on: - php-fpm networks: @@ -505,7 +638,7 @@ services: caddy: build: ./caddy volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} - ${CADDY_CONFIG_PATH}:/etc/caddy - ${CADDY_HOST_LOG_PATH}:/var/log/caddy - ${DATA_PATH_HOST}:/root/.caddy @@ -550,11 +683,14 @@ services: ### pgAdmin ############################################## pgadmin: - build: ./pgadmin + image: dpage/pgadmin4:latest + environment: + - "PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL}" + - "PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD}" ports: - - "5050:5050" + - "${PGADMIN_PORT}:80" volumes: - - ${DATA_PATH_HOST}/pgadmin-backup:/var/lib/pgadmin/storage/pgadmin4 + - ${DATA_PATH_HOST}/pgadmin:/var/lib/pgadmin depends_on: - postgres networks: @@ -568,8 +704,10 @@ services: - elasticsearch:/usr/share/elasticsearch/data environment: - cluster.name=laradock-cluster + - node.name=laradock-node - bootstrap.memory_lock=true - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - cluster.initial_master_nodes=laradock-node ulimits: memlock: soft: -1 @@ -583,6 +721,24 @@ services: - frontend - backend +### Logstash ############################################## + logstash: + build: ./logstash + volumes: + - './logstash/config/logstash.yml:/usr/share/logstash/config/logstash.yml' + - './logstash/pipeline:/usr/share/logstash/pipeline' + ports: + - '5001:5001' + environment: + LS_JAVA_OPTS: '-Xmx1g -Xms1g' + env_file: + - .env + networks: + - frontend + - backend + depends_on: + - elasticsearch + ### Kibana ############################################## kibana: build: ./kibana @@ -710,6 +866,36 @@ services: networks: - backend +### Graylog ####################################### + graylog: + build: ./graylog + environment: + - GRAYLOG_PASSWORD_SECRET=${GRAYLOG_PASSWORD} + - GRAYLOG_ROOT_PASSWORD_SHA2=${GRAYLOG_SHA256_PASSWORD} + - GRAYLOG_HTTP_EXTERNAL_URI=http://127.0.0.1:${GRAYLOG_PORT}/ + links: + - mongo + - elasticsearch + depends_on: + - mongo + - elasticsearch + ports: + # Graylog web interface and REST API + - ${GRAYLOG_PORT}:9000 + # Syslog TCP + - ${GRAYLOG_SYSLOG_TCP_PORT}:514 + # Syslog UDP + - ${GRAYLOG_SYSLOG_UDP_PORT}:514/udp + # GELF TCP + - ${GRAYLOG_GELF_TCP_PORT}:12201 + # GELF UDP + - ${GRAYLOG_GELF_UDP_PORT}:12201/udp + user: root + volumes: + - ./graylog/config:/usr/share/graylog/data/config + networks: + - backend + ### Laravel Echo Server ####################################### laravel-echo-server: build: @@ -723,7 +909,7 @@ services: networks: - frontend - backend - + ### Solr ################################################ solr: build: @@ -858,9 +1044,9 @@ services: ### AWS EB-CLI ################################################ aws: build: - context: ./aws + context: ./aws-eb-cli volumes: - - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER} + - ${APP_CODE_PATH_HOST}:${APP_CODE_PATH_CONTAINER}${APP_CODE_CONTAINER_FLAG} depends_on: - workspace tty: true @@ -885,18 +1071,19 @@ services: context: ./gitlab environment: GITLAB_OMNIBUS_CONFIG: | - external_url '${GITLAB_DOMAIN_NAME}' - redis['enable'] = false - nginx['listen_https'] = false - nginx['listen_port'] = 80 - postgresql['enable'] = false - gitlab_rails['trusted_proxies'] = ['caddy','nginx','apache2'] - gitlab_rails['redis_host'] = 'redis' + external_url '${GITLAB_DOMAIN_NAME}' + redis['enable'] = false + nginx['listen_https'] = false + nginx['listen_port'] = 80 + nginx['custom_gitlab_server_config'] = "set_real_ip_from 172.0.0.0/8;\nreal_ip_header X-Real-IP;\nreal_ip_recursive on;" + postgresql['enable'] = false + gitlab_rails['trusted_proxies'] = ['caddy','nginx','apache2'] + gitlab_rails['redis_host'] = 'redis' gitlab_rails['redis_database'] = 8 - gitlab_rails['db_host'] = 'postgres' - gitlab_rails['db_username'] = 'laradock_gitlab' - gitlab_rails['db_password'] = 'laradock_gitlab' - gitlab_rails['db_database'] = 'laradock_gitlab' + gitlab_rails['db_host'] = '${GITLAB_POSTGRES_HOST}' + gitlab_rails['db_username'] = '${GITLAB_POSTGRES_USER}' + gitlab_rails['db_password'] = '${GITLAB_POSTGRES_PASSWORD}' + gitlab_rails['db_database'] = '${GITLAB_POSTGRES_DB}' gitlab_rails['initial_root_password'] = '${GITLAB_ROOT_PASSWORD}' gitlab_rails['gitlab_shell_ssh_port'] = ${GITLAB_HOST_SSH_PORT} volumes: @@ -915,12 +1102,15 @@ services: gitlab-runner: image: gitlab/gitlab-runner:latest environment: - - CI_SERVER_URL=${GITLAB_DOMAIN_NAME} + - CI_SERVER_URL=${GITLAB_CI_SERVER_URL} + - REGISTRATION_TOKEN=${GITLAB_RUNNER_REGISTRATION_TOKEN} + - RUNNER_NAME=${COMPOSE_PROJECT_NAME}-runner + - REGISTER_NON_INTERACTIVE=${GITLAB_REGISTER_NON_INTERACTIVE} + - RUNNER_EXECUTOR=shell volumes: - ${DATA_PATH_HOST}/gitlab/runner:/etc/gitlab-runner - /var/run/docker.sock:/var/run/docker.sock:rw - restart: always - + ### JupyterHub ######################################### jupyterhub: build: @@ -928,7 +1118,6 @@ services: depends_on: - postgres - jupyterhub-user - restart: always volumes: - /var/run/docker.sock:/var/run/docker.sock:rw - ${DATA_PATH_HOST}/jupyterhub/:/data @@ -949,7 +1138,8 @@ services: - JUPYTERHUB_OAUTH_CALLBACK_URL=${JUPYTERHUB_OAUTH_CALLBACK_URL} - JUPYTERHUB_OAUTH_CLIENT_ID=${JUPYTERHUB_OAUTH_CLIENT_ID} - JUPYTERHUB_OAUTH_CLIENT_SECRET=${JUPYTERHUB_OAUTH_CLIENT_SECRET} - - JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE=${JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE} + - JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE=${COMPOSE_PROJECT_NAME}_jupyterhub-user + - JUPYTERHUB_ENABLE_NVIDIA=${JUPYTERHUB_ENABLE_NVIDIA} jupyterhub-user: build: context: ./jupyterhub @@ -999,9 +1189,10 @@ services: networks: - backend -### PHPRedisAdmin ################################################ - phpredisadmin: - image: erikdubbelboer/phpredisadmin:latest +### REDISWEBUI ################################################ + redis-webui: + build: + context: ./redis-webui environment: - ADMIN_USER=${REDIS_WEBUI_USERNAME} - ADMIN_PASS=${REDIS_WEBUI_PASSWORD} @@ -1018,7 +1209,6 @@ services: mongo-webui: build: context: ./mongo-webui - restart: always environment: - ROOT_URL=${MONGO_WEBUI_ROOT_URL} - MONGO_URL=${MONGO_WEBUI_MONGO_URL} @@ -1031,7 +1221,7 @@ services: - backend depends_on: - mongo - + ### Metabase ################################################# metabase: image: metabase/metabase:latest @@ -1293,3 +1483,96 @@ services: backend: aliases: - fetchmail + +### TRAEFIK ######################################### + traefik: + build: + context: ./traefik + command: --docker + volumes: + - /var/run/docker.sock:/var/run/docker.sock + ports: + - "${TRAEFIK_HOST_HTTP_PORT}:80" + - "${TRAEFIK_HOST_HTTPS_PORT}:443" + networks: + - frontend + - backend + labels: + - traefik.backend=traefik + - traefik.frontend.rule=Host:monitor.localhost + - traefik.port=8080 + +### MOSQUITTO Broker ######################################### + mosquitto: + build: + context: ./mosquitto + volumes: + - ${DATA_PATH_HOST}/mosquitto/data:/mosquitto/data + ports: + - "${MOSQUITTO_PORT}:9001" + networks: + - frontend + - backend + +### COUCHDB ################################################### + couchdb: + build: + context: ./couchdb + volumes: + - ${DATA_PATH_HOST}/couchdb/data:/opt/couchdb/data + ports: + - "${COUCHDB_PORT}:5984" + networks: + - backend + +### Manticore Search ########################################### + manticore: + build: + context: ./manticore + volumes: + - ${MANTICORE_CONFIG_PATH}:/etc/sphinxsearch + - ${DATA_PATH_HOST}/manticore/data:/var/lib/manticore/data + - ${DATA_PATH_HOST}/manticore/log:/var/lib/manticore/log + ports: + - "${MANTICORE_API_PORT}:9312" + - "${MANTICORE_SPHINXQL_PORT}:9306" + - "${MANTICORE_HTTP_PORT}:9308" + networks: + - backend + +### SONARQUBE ################################################ + sonarqube: + build: + context: ./sonarqube + hostname: "${SONARQUBE_HOSTNAME}" + volumes: + - ${DATA_PATH_HOST}/sonarqube/conf:/opt/sonarqube/conf + - ${DATA_PATH_HOST}/sonarqube/data:/opt/sonarqube/data + - ${DATA_PATH_HOST}/sonarqube/logs:/opt/sonarqube/logs + - ${DATA_PATH_HOST}/sonarqube/extensions:/opt/sonarqube/extensions + - ${DATA_PATH_HOST}/sonarqube/plugins:/opt/sonarqube/lib/bundled-plugins + ports: + - ${SONARQUBE_PORT}:9000 + depends_on: + - postgres + environment: + - sonar.jdbc.username=${SONARQUBE_POSTGRES_USER} + - sonar.jdbc.password=${SONARQUBE_POSTGRES_PASSWORD} + - sonar.jdbc.url=jdbc:postgresql://${SONARQUBE_POSTGRES_HOST}:5432/${SONARQUBE_POSTGRES_DB} + networks: + - backend + - frontend +### CONFLUENCE ################################################ + confluence: + container_name: Confluence + image: atlassian/confluence-server:${CONFLUENCE_VERSION} + restart: always + ports: + - "${CONFLUENCE_HOST_HTTP_PORT}:8090" + networks: + - frontend + - backend + depends_on: + - postgres + volumes: + - ${DATA_PATH_HOST}/Confluence:/var/atlassian/application-data diff --git a/elasticsearch/Dockerfile b/elasticsearch/Dockerfile index c82bd0c5..dcc28741 100644 --- a/elasticsearch/Dockerfile +++ b/elasticsearch/Dockerfile @@ -1,3 +1,3 @@ -FROM docker.elastic.co/elasticsearch/elasticsearch:6.2.3 +FROM docker.elastic.co/elasticsearch/elasticsearch:7.1.1 EXPOSE 9200 9300 diff --git a/env-example b/env-example index b9866d63..0e80c21b 100644 --- a/env-example +++ b/env-example @@ -7,8 +7,11 @@ # Point to the path of your applications code on your host APP_CODE_PATH_HOST=../ -# Point to where the `APP_CODE_PATH_HOST` should be in the container. You may add flags to the path `:cached`, `:delegated`. When using Docker Sync add `:nocopy` -APP_CODE_PATH_CONTAINER=/var/www:cached +# Point to where the `APP_CODE_PATH_HOST` should be in the container +APP_CODE_PATH_CONTAINER=/var/www + +# You may add flags to the path `:cached`, `:delegated`. When using Docker Sync add `:nocopy` +APP_CODE_CONTAINER_FLAG=:cached # Choose storage path on your machine. For all storage systems DATA_PATH_HOST=~/.laradock/data @@ -34,7 +37,7 @@ COMPOSE_PROJECT_NAME=laradock ### PHP Version ########################################### -# Select a PHP version of the Workspace and PHP-FPM containers (Does not apply to HHVM). Accepted values: 7.2 - 7.1 - 7.0 - 5.6 +# Select a PHP version of the Workspace and PHP-FPM containers (Does not apply to HHVM). Accepted values: 7.3 - 7.2 - 7.1 - 7.0 - 5.6 PHP_VERSION=7.2 ### Phalcon Version ########################################### @@ -79,6 +82,7 @@ DOCKER_SYNC_STRATEGY=native_osx ### WORKSPACE ############################################# WORKSPACE_COMPOSER_GLOBAL_INSTALL=true +WORKSPACE_COMPOSER_AUTH=false WORKSPACE_COMPOSER_REPO_PACKAGIST= WORKSPACE_INSTALL_NODE=true WORKSPACE_NODE_VERSION=node @@ -88,6 +92,7 @@ WORKSPACE_YARN_VERSION=latest WORKSPACE_INSTALL_NPM_GULP=true WORKSPACE_INSTALL_NPM_BOWER=false WORKSPACE_INSTALL_NPM_VUE_CLI=true +WORKSPACE_INSTALL_NPM_ANGULAR_CLI=false WORKSPACE_INSTALL_PHPREDIS=true WORKSPACE_INSTALL_WORKSPACE_SSH=false WORKSPACE_INSTALL_SUBVERSION=false @@ -97,13 +102,16 @@ WORKSPACE_INSTALL_SSH2=false WORKSPACE_INSTALL_LDAP=false WORKSPACE_INSTALL_GMP=false WORKSPACE_INSTALL_SOAP=false +WORKSPACE_INSTALL_XSL=false WORKSPACE_INSTALL_IMAP=false WORKSPACE_INSTALL_MONGO=false WORKSPACE_INSTALL_AMQP=false +WORKSPACE_INSTALL_CASSANDRA=false WORKSPACE_INSTALL_MSSQL=false WORKSPACE_INSTALL_DRUSH=false WORKSPACE_DRUSH_VERSION=8.1.17 WORKSPACE_INSTALL_DRUPAL_CONSOLE=false +WORKSPACE_INSTALL_WP_CLI=false WORKSPACE_INSTALL_AEROSPIKE=false WORKSPACE_INSTALL_V8JS=false WORKSPACE_INSTALL_LARAVEL_ENVOY=false @@ -114,6 +122,7 @@ WORKSPACE_INSTALL_LINUXBREW=false WORKSPACE_INSTALL_MC=false WORKSPACE_INSTALL_SYMFONY=false WORKSPACE_INSTALL_PYTHON=false +WORKSPACE_INSTALL_POWERLINE=false WORKSPACE_INSTALL_IMAGE_OPTIMIZERS=false WORKSPACE_INSTALL_IMAGEMAGICK=false WORKSPACE_INSTALL_TERRAFORM=false @@ -121,18 +130,24 @@ WORKSPACE_INSTALL_DUSK_DEPS=false WORKSPACE_INSTALL_PG_CLIENT=false WORKSPACE_INSTALL_PHALCON=false WORKSPACE_INSTALL_SWOOLE=false +WORKSPACE_INSTALL_TAINT=false WORKSPACE_INSTALL_LIBPNG=false WORKSPACE_INSTALL_IONCUBE=false WORKSPACE_INSTALL_MYSQL_CLIENT=false +WORKSPACE_INSTALL_PING=false +WORKSPACE_INSTALL_SSHPASS=false +WORKSPACE_INSTALL_INOTIFY=false +WORKSPACE_INSTALL_FSWATCH=false WORKSPACE_PUID=1000 WORKSPACE_PGID=1000 WORKSPACE_CHROME_DRIVER_VERSION=2.42 WORKSPACE_TIMEZONE=UTC WORKSPACE_SSH_PORT=2222 +WORKSPACE_INSTALL_FFMPEG=false +WORKSPACE_INSTALL_GNU_PARALLEL=false ### PHP_FPM ############################################### -PHP_FPM_INSTALL_ZIP_ARCHIVE=true PHP_FPM_INSTALL_BCMATH=true PHP_FPM_INSTALL_MYSQLI=true PHP_FPM_INSTALL_INTL=true @@ -142,13 +157,16 @@ PHP_FPM_INSTALL_IMAGE_OPTIMIZERS=true PHP_FPM_INSTALL_PHPREDIS=true PHP_FPM_INSTALL_MEMCACHED=false PHP_FPM_INSTALL_XDEBUG=false +PHP_FPM_INSTALL_XHPROF=false PHP_FPM_INSTALL_PHPDBG=false PHP_FPM_INSTALL_IMAP=false PHP_FPM_INSTALL_MONGO=false PHP_FPM_INSTALL_AMQP=false +PHP_FPM_INSTALL_CASSANDRA=false PHP_FPM_INSTALL_MSSQL=false PHP_FPM_INSTALL_SSH2=false PHP_FPM_INSTALL_SOAP=false +PHP_FPM_INSTALL_XSL=false PHP_FPM_INSTALL_GMP=false PHP_FPM_INSTALL_EXIF=false PHP_FPM_INSTALL_AEROSPIKE=false @@ -157,20 +175,42 @@ PHP_FPM_INSTALL_GHOSTSCRIPT=false PHP_FPM_INSTALL_LDAP=false PHP_FPM_INSTALL_PHALCON=false PHP_FPM_INSTALL_SWOOLE=false +PHP_FPM_INSTALL_TAINT=false PHP_FPM_INSTALL_PG_CLIENT=false +PHP_FPM_INSTALL_POSTGIS=false PHP_FPM_INSTALL_PCNTL=false PHP_FPM_INSTALL_CALENDAR=false PHP_FPM_INSTALL_FAKETIME=false PHP_FPM_INSTALL_IONCUBE=false +PHP_FPM_INSTALL_RDKAFKA=false PHP_FPM_FAKETIME=-0 +PHP_FPM_INSTALL_APCU=false PHP_FPM_INSTALL_YAML=false +PHP_FPM_INSTALL_ADDITIONAL_LOCALES=false +PHP_FPM_INSTALL_MYSQL_CLIENT=false +PHP_FPM_INSTALL_PING=false +PHP_FPM_INSTALL_SSHPASS=false +PHP_FPM_FFMPEG=false +PHP_FPM_ADDITIONAL_LOCALES="es_ES.UTF-8 fr_FR.UTF-8" ### PHP_WORKER ############################################ PHP_WORKER_INSTALL_PGSQL=false PHP_WORKER_INSTALL_BCMATH=false +PHP_WORKER_INSTALL_PHALCON=false PHP_WORKER_INSTALL_SOAP=false PHP_WORKER_INSTALL_ZIP_ARCHIVE=false +PHP_WORKER_INSTALL_MYSQL_CLIENT=false +PHP_WORKER_INSTALL_AMQP=false +PHP_WORKER_INSTALL_GHOSTSCRIPT=false +PHP_WORKER_INSTALL_SWOOLE=false +PHP_WORKER_INSTALL_TAINT=false +PHP_WORKER_INSTALL_FFMPEG=false +PHP_WORKER_INSTALL_GMP=false +PHP_WORKER_INSTALL_CASSANDRA=false + +PHP_WORKER_PUID=1000 +PHP_WORKER_PGID=1000 ### NGINX ################################################# @@ -182,6 +222,10 @@ NGINX_PHP_UPSTREAM_CONTAINER=php-fpm NGINX_PHP_UPSTREAM_PORT=9000 NGINX_SSL_PATH=./nginx/ssl/ +### LARAVEL_HORIZON ################################################ + +LARAVEL_HORIZON_INSTALL_SOCKETS=false + ### APACHE ################################################ APACHE_HOST_HTTP_PORT=80 @@ -207,6 +251,14 @@ MYSQL_ENTRYPOINT_INITDB=./mysql/docker-entrypoint-initdb.d REDIS_PORT=6379 +### REDIS CLUSTER ######################################### + +REDIS_CLUSTER_PORT_RANGE=7000-7005 + +### ZooKeeper ############################################# + +ZOOKEEPER_PORT=2181 + ### Percona ############################################### PERCONA_DATABASE=homestead @@ -224,6 +276,7 @@ MSSQL_PORT=1433 ### MARIADB ############################################### +MARIADB_VERSION=latest MARIADB_DATABASE=default MARIADB_USER=default MARIADB_PASSWORD=secret @@ -330,10 +383,30 @@ JENKINS_HOST_HTTP_PORT=8090 JENKINS_HOST_SLAVE_AGENT_PORT=50000 JENKINS_HOME=./jenkins/jenkins_home +### CONFLUENCE ############################################### +CONFLUENCE_POSTGRES_INIT=true +CONFLUENCE_VERSION=6.13-ubuntu-18.04-adoptopenjdk8 +CONFLUENCE_POSTGRES_DB=laradock_confluence +CONFLUENCE_POSTGRES_USER=laradock_confluence +CONFLUENCE_POSTGRES_PASSWORD=laradock_confluence +CONFLUENCE_HOST_HTTP_PORT=8090 + ### GRAFANA ############################################### GRAFANA_PORT=3000 +### GRAYLOG ############################################### + +# password must be 16 characters long +GRAYLOG_PASSWORD=somesupersecretpassword +# sha256 representation of the password +GRAYLOG_SHA256_PASSWORD=b1cb6e31e172577918c9e7806c572b5ed8477d3f57aa737bee4b5b1db3696f09 +GRAYLOG_PORT=9000 +GRAYLOG_SYSLOG_TCP_PORT=514 +GRAYLOG_SYSLOG_UDP_PORT=514 +GRAYLOG_GELF_TCP_PORT=12201 +GRAYLOG_GELF_UDP_PORT=12201 + ### BLACKFIRE ############################################# # Create an account on blackfire.io. Don't enable blackfire and xDebug at the same time. # visit https://blackfire.io/docs/24-days/06-installation#install-probe-debian for more info. @@ -349,13 +422,9 @@ AEROSPIKE_SERVICE_PORT=3000 AEROSPIKE_FABRIC_PORT=3001 AEROSPIKE_HEARTBEAT_PORT=3002 AEROSPIKE_INFO_PORT=3003 - -## Temp solution, this should be in the dockerfile -# for all versions "https://github.com/aerospike/aerospike-client-php/archive/master.tar.gz" -# for php 7.2 (using this branch until the support for 7.2 on master) "https://github.com/aerospike/aerospike-client-php/archive/7.2.0-release-candidate.tar.gz" -AEROSPIKE_PHP_REPOSITORY=https://github.com/aerospike/aerospike-client-php/archive/7.2.0-release-candidate.tar.gz -# for php 5.6 -# AEROSPIKE_PHP_REPOSITORY=https://github.com/aerospike/aerospike-client-php5/archive/3.4.15.tar.gz +AEROSPIKE_STORAGE_GB=1 +AEROSPIKE_MEM_GB=1 +AEROSPIKE_NAMESPACE=test ### RETHINKDB ############################################# @@ -492,14 +561,25 @@ SOLR_DATAIMPORTHANDLER_MYSQL=false SOLR_DATAIMPORTHANDLER_MSSQL=false ### GITLAB ############################################### +GITLAB_POSTGRES_INIT=true GITLAB_HOST_HTTP_PORT=8989 GITLAB_HOST_HTTPS_PORT=9898 GITLAB_HOST_SSH_PORT=2289 GITLAB_DOMAIN_NAME=http://localhost GITLAB_ROOT_PASSWORD=laradock GITLAB_HOST_LOG_PATH=./logs/gitlab +GITLAB_POSTGRES_HOST=postgres +GITLAB_POSTGRES_USER=laradock_gitlab +GITLAB_POSTGRES_PASSWORD=laradock_gitlab +GITLAB_POSTGRES_DB=laradock_gitlab + +### GITLAB-RUNNER ############################################### +GITLAB_CI_SERVER_URL=http://localhost:8989 +GITLAB_RUNNER_REGISTRATION_TOKEN= +GITLAB_REGISTER_NON_INTERACTIVE=true ### JUPYTERHUB ############################################### +JUPYTERHUB_POSTGRES_INIT=true JUPYTERHUB_POSTGRES_HOST=postgres JUPYTERHUB_POSTGRES_USER=laradock_jupyterhub JUPYTERHUB_POSTGRES_PASSWORD=laradock_jupyterhub @@ -508,10 +588,10 @@ JUPYTERHUB_PORT=9991 JUPYTERHUB_OAUTH_CALLBACK_URL=http://laradock:9991/hub/oauth_callback JUPYTERHUB_OAUTH_CLIENT_ID={GITHUB_CLIENT_ID} JUPYTERHUB_OAUTH_CLIENT_SECRET={GITHUB_CLIENT_SECRET} -JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE=laradock_jupyterhub-user JUPYTERHUB_CUSTOM_CONFIG=./jupyterhub/jupyterhub_config.py JUPYTERHUB_USER_DATA=/jupyterhub JUPYTERHUB_USER_LIST=./jupyterhub/userlist +JUPYTERHUB_ENABLE_NVIDIA=false ### IPYTHON ################################################## LARADOCK_IPYTHON_CONTROLLER_IP=127.0.0.1 @@ -519,7 +599,7 @@ LARADOCK_IPYTHON_CONTROLLER_IP=127.0.0.1 ### NETDATA ############################################### NETDATA_PORT=19999 -### PHPREDISADMIN ######################################### +### REDISWEBUI ######################################### REDIS_WEBUI_USERNAME=laradock REDIS_WEBUI_PASSWORD=laradock REDIS_WEBUI_CONNECT_HOST=redis @@ -614,3 +694,80 @@ MAILU_ADMIN=true MAILU_WEBMAIL=rainloop # Dav server implementation (value: radicale, none) MAILU_WEBDAV=radicale + + +### TRAEFIK ################################################# + +TRAEFIK_HOST_HTTP_PORT=80 +TRAEFIK_HOST_HTTPS_PORT=443 + + +### MOSQUITTO ################################################# + +MOSQUITTO_PORT=9001 + +### COUCHDB ################################################### + +COUCHDB_PORT=5984 + +### Manticore Search ########################################## + +MANTICORE_CONFIG_PATH=./manticore/config +MANTICORE_API_PORT=9312 +MANTICORE_SPHINXQL_PORT=9306 +MANTICORE_HTTP_PORT=9308 + +### pgadmin ################################################## +# use this address http://ip6-localhost:5050 +PGADMIN_PORT=5050 +PGADMIN_DEFAULT_EMAIL=pgadmin4@pgadmin.org +PGADMIN_DEFAULT_PASSWORD=admin + +### SONARQUBE ################################################ +## docker-compose up -d sonarqube +## (If you encounter a database error) +## docker-compose exec --user=root postgres +## source docker-entrypoint-initdb.d/init_sonarqube_db.sh +## (If you encounter logs error) +## docker-compose run --user=root --rm sonarqube chown sonarqube:sonarqube /opt/sonarqube/logs + +SONARQUBE_HOSTNAME=sonar.example.com +SONARQUBE_PORT=9000 +SONARQUBE_POSTGRES_INIT=true +SONARQUBE_POSTGRES_HOST=postgres +SONARQUBE_POSTGRES_DB=sonar +SONARQUBE_POSTGRES_USER=sonar +SONARQUBE_POSTGRES_PASSWORD=sonarPass + +### CASSANDRA ################################################ + +# Cassandra Version, supported tags can be found at https://hub.docker.com/r/bitnami/cassandra/ +CASSANDRA_VERSION=latest +# Inter-node cluster communication port. Default: 7000 +CASSANDRA_TRANSPORT_PORT_NUMBER=7000 +# JMX connections port. Default: 7199 +CASSANDRA_JMX_PORT_NUMBER=7199 +# Client port. Default: 9042. +CASSANDRA_CQL_PORT_NUMBER=9042 +# Cassandra user name. Defaults: cassandra +CASSANDRA_USER=cassandra +# Password seeder will change the Cassandra default credentials at initialization. In clusters, only one node should be marked as password seeder. Default: no +CASSANDRA_PASSWORD_SEEDER=no +# Cassandra user password. Default: cassandra +CASSANDRA_PASSWORD=cassandra +# Number of tokens for the node. Default: 256. +CASSANDRA_NUM_TOKENS=256 +# Hostname used to configure Cassandra. It can be either an IP or a domain. If left empty, it will be resolved to the machine IP. +CASSANDRA_HOST= +# Cluster name to configure Cassandra.. Defaults: My Cluster +CASSANDRA_CLUSTER_NAME="My Cluster" +# : Hosts that will act as Cassandra seeds. No defaults. +CASSANDRA_SEEDS= + # Snitch name (which determines which data centers and racks nodes belong to). Default SimpleSnitch +CASSANDRA_ENDPOINT_SNITCH=SimpleSnitch + # Enable the thrift RPC endpoint. Default :true +CASSANDRA_ENABLE_RPC=true +# Datacenter name for the cluster. Ignored in SimpleSnitch endpoint snitch. Default: dc1. +CASSANDRA_DATACENTER=dc1 +# Rack name for the cluster. Ignored in SimpleSnitch endpoint snitch. Default: rack1. +CASSANDRA_RACK=rack1 \ No newline at end of file diff --git a/graylog/Dockerfile b/graylog/Dockerfile new file mode 100644 index 00000000..c9b22094 --- /dev/null +++ b/graylog/Dockerfile @@ -0,0 +1,3 @@ +FROM graylog/graylog:3.0 + +EXPOSE 9000 diff --git a/graylog/config/graylog.conf b/graylog/config/graylog.conf new file mode 100644 index 00000000..ff8200bb --- /dev/null +++ b/graylog/config/graylog.conf @@ -0,0 +1,481 @@ +############################ +# GRAYLOG CONFIGURATION FILE +############################ +# +# This is the Graylog configuration file. The file has to use ISO 8859-1/Latin-1 character encoding. +# Characters that cannot be directly represented in this encoding can be written using Unicode escapes +# as defined in https://docs.oracle.com/javase/specs/jls/se8/html/jls-3.html#jls-3.3, using the \u prefix. +# For example, \u002c. +# +# * Entries are generally expected to be a single line of the form, one of the following: +# +# propertyName=propertyValue +# propertyName:propertyValue +# +# * White space that appears between the property name and property value is ignored, +# so the following are equivalent: +# +# name=Stephen +# name = Stephen +# +# * White space at the beginning of the line is also ignored. +# +# * Lines that start with the comment characters ! or # are ignored. Blank lines are also ignored. +# +# * The property value is generally terminated by the end of the line. White space following the +# property value is not ignored, and is treated as part of the property value. +# +# * A property value can span several lines if each line is terminated by a backslash (‘\’) character. +# For example: +# +# targetCities=\ +# Detroit,\ +# Chicago,\ +# Los Angeles +# +# This is equivalent to targetCities=Detroit,Chicago,Los Angeles (white space at the beginning of lines is ignored). +# +# * The characters newline, carriage return, and tab can be inserted with characters \n, \r, and \t, respectively. +# +# * The backslash character must be escaped as a double backslash. For example: +# +# path=c:\\docs\\doc1 +# + +# If you are running more than one instances of Graylog server you have to select one of these +# instances as master. The master will perform some periodical tasks that non-masters won't perform. +is_master = true + +# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea +# to use an absolute file path here if you are starting Graylog server from init scripts or similar. +node_id_file = /usr/share/graylog/data/config/node-id + +# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters. +# Generate one by using for example: pwgen -N 1 -s 96 +password_secret = replacethiswithyourownsecret! + +# The default root user is named 'admin' +#root_username = admin + +# You MUST specify a hash password for the root user (which you only need to initially set up the +# system and in case you lose connectivity to your authentication backend) +# This password cannot be changed using the API or via the web interface. If you need to change it, +# modify it in this file. +# Create one by using for example: echo -n yourpassword | shasum -a 256 +# and put the resulting hash value into the following line + +# Default password: admin +# CHANGE THIS! +root_password_sha2 = 8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918 + +# The email address of the root user. +# Default is empty +#root_email = "" + +# The time zone setting of the root user. See http://www.joda.org/joda-time/timezones.html for a list of valid time zones. +# Default is UTC +#root_timezone = UTC + +# Set plugin directory here (relative or absolute) +plugin_dir = /usr/share/graylog/plugin + +############### +# HTTP settings +############### + +#### HTTP bind address +# +# The network interface used by the Graylog HTTP interface. +# +# This network interface must be accessible by all Graylog nodes in the cluster and by all clients +# using the Graylog web interface. +# +# If the port is omitted, Graylog will use port 9000 by default. +# +# Default: 127.0.0.1:9000 +#http_bind_address = 127.0.0.1:9000 +#http_bind_address = [2001:db8::1]:9000 +http_bind_address = 0.0.0.0:9000 + +#### HTTP publish URI +# +# The HTTP URI of this Graylog node which is used to communicate with the other Graylog nodes in the cluster and by all +# clients using the Graylog web interface. +# +# The URI will be published in the cluster discovery APIs, so that other Graylog nodes will be able to find and connect to this Graylog node. +# +# This configuration setting has to be used if this Graylog node is available on another network interface than $http_bind_address, +# for example if the machine has multiple network interfaces or is behind a NAT gateway. +# +# If $http_bind_address contains a wildcard IPv4 address (0.0.0.0), the first non-loopback IPv4 address of this machine will be used. +# This configuration setting *must not* contain a wildcard address! +# +# Default: http://$http_bind_address/ +#http_publish_uri = http://192.168.1.1:9000/ + +#### External Graylog URI +# +# The public URI of Graylog which will be used by the Graylog web interface to communicate with the Graylog REST API. +# +# The external Graylog URI usually has to be specified, if Graylog is running behind a reverse proxy or load-balancer +# and it will be used to generate URLs addressing entities in the Graylog REST API (see $http_bind_address). +# +# When using Graylog Collector, this URI will be used to receive heartbeat messages and must be accessible for all collectors. +# +# This setting can be overriden on a per-request basis with the "X-Graylog-Server-URL" HTTP request header. +# +# Default: $http_publish_uri +#http_external_uri = + +#### Enable CORS headers for HTTP interface +# +# This is necessary for JS-clients accessing the server directly. +# If these are disabled, modern browsers will not be able to retrieve resources from the server. +# This is enabled by default. Uncomment the next line to disable it. +#http_enable_cors = false + +#### Enable GZIP support for HTTP interface +# +# This compresses API responses and therefore helps to reduce +# overall round trip times. This is enabled by default. Uncomment the next line to disable it. +#http_enable_gzip = false + +# The maximum size of the HTTP request headers in bytes. +#http_max_header_size = 8192 + +# The size of the thread pool used exclusively for serving the HTTP interface. +#http_thread_pool_size = 16 + +################ +# HTTPS settings +################ + +#### Enable HTTPS support for the HTTP interface +# +# This secures the communication with the HTTP interface with TLS to prevent request forgery and eavesdropping. +# +# Default: false +#http_enable_tls = true + +# The X.509 certificate chain file in PEM format to use for securing the HTTP interface. +#http_tls_cert_file = /path/to/graylog.crt + +# The PKCS#8 private key file in PEM format to use for securing the HTTP interface. +#http_tls_key_file = /path/to/graylog.key + +# The password to unlock the private key used for securing the HTTP interface. +#http_tls_key_password = secret + + +# Comma separated list of trusted proxies that are allowed to set the client address with X-Forwarded-For +# header. May be subnets, or hosts. +#trusted_proxies = 127.0.0.1/32, 0:0:0:0:0:0:0:1/128 + +# List of Elasticsearch hosts Graylog should connect to. +# Need to be specified as a comma-separated list of valid URIs for the http ports of your elasticsearch nodes. +# If one or more of your elasticsearch hosts require authentication, include the credentials in each node URI that +# requires authentication. +# +# Default: http://127.0.0.1:9200 +elasticsearch_hosts = http://elasticsearch:9200 + +# Maximum amount of time to wait for successfull connection to Elasticsearch HTTP port. +# +# Default: 10 Seconds +#elasticsearch_connect_timeout = 10s + +# Maximum amount of time to wait for reading back a response from an Elasticsearch server. +# +# Default: 60 seconds +#elasticsearch_socket_timeout = 60s + +# Maximum idle time for an Elasticsearch connection. If this is exceeded, this connection will +# be tore down. +# +# Default: inf +#elasticsearch_idle_timeout = -1s + +# Maximum number of total connections to Elasticsearch. +# +# Default: 20 +#elasticsearch_max_total_connections = 20 + +# Maximum number of total connections per Elasticsearch route (normally this means per +# elasticsearch server). +# +# Default: 2 +#elasticsearch_max_total_connections_per_route = 2 + +# Maximum number of times Graylog will retry failed requests to Elasticsearch. +# +# Default: 2 +#elasticsearch_max_retries = 2 + +# Enable automatic Elasticsearch node discovery through Nodes Info, +# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster-nodes-info.html +# +# WARNING: Automatic node discovery does not work if Elasticsearch requires authentication, e. g. with Shield. +# +# Default: false +#elasticsearch_discovery_enabled = true + +# Filter for including/excluding Elasticsearch nodes in discovery according to their custom attributes, +# see https://www.elastic.co/guide/en/elasticsearch/reference/5.4/cluster.html#cluster-nodes +# +# Default: empty +#elasticsearch_discovery_filter = rack:42 + +# Frequency of the Elasticsearch node discovery. +# +# Default: 30s +# elasticsearch_discovery_frequency = 30s + +# Enable payload compression for Elasticsearch requests. +# +# Default: false +#elasticsearch_compression_enabled = true + +# Disable checking the version of Elasticsearch for being compatible with this Graylog release. +# WARNING: Using Graylog with unsupported and untested versions of Elasticsearch may lead to data loss! +#elasticsearch_disable_version_check = true + +# Disable message retention on this node, i. e. disable Elasticsearch index rotation. +#no_retention = false + +# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only +# be enabled with care. See also: http://docs.graylog.org/en/2.1/pages/queries.html +allow_leading_wildcard_searches = false + +# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and +# should only be enabled after making sure your Elasticsearch cluster has enough memory. +allow_highlighting = false + +# Global request timeout for Elasticsearch requests (e. g. during search, index creation, or index time-range +# calculations) based on a best-effort to restrict the runtime of Elasticsearch operations. +# Default: 1m +#elasticsearch_request_timeout = 1m + +# Global timeout for index optimization (force merge) requests. +# Default: 1h +#elasticsearch_index_optimization_timeout = 1h + +# Maximum number of concurrently running index optimization (force merge) jobs. +# If you are using lots of different index sets, you might want to increase that number. +# Default: 20 +#elasticsearch_index_optimization_jobs = 20 + +# Time interval for index range information cleanups. This setting defines how often stale index range information +# is being purged from the database. +# Default: 1h +#index_ranges_cleanup_interval = 1h + +# Batch size for the Elasticsearch output. This is the maximum (!) number of messages the Elasticsearch output +# module will get at once and write to Elasticsearch in a batch call. If the configured batch size has not been +# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember +# that every outputbuffer processor manages its own batch and performs its own batch write calls. +# ("outputbuffer_processors" variable) +output_batch_size = 500 + +# Flush interval (in seconds) for the Elasticsearch output. This is the maximum amount of time between two +# batches of messages written to Elasticsearch. It is only effective at all if your minimum number of messages +# for this time period is less than output_batch_size * outputbuffer_processors. +output_flush_interval = 1 + +# As stream outputs are loaded only on demand, an output which is failing to initialize will be tried over and +# over again. To prevent this, the following configuration options define after how many faults an output will +# not be tried again for an also configurable amount of seconds. +output_fault_count_threshold = 5 +output_fault_penalty_seconds = 30 + +# The number of parallel running processors. +# Raise this number if your buffers are filling up. +processbuffer_processors = 5 +outputbuffer_processors = 3 + +# The following settings (outputbuffer_processor_*) configure the thread pools backing each output buffer processor. +# See https://docs.oracle.com/javase/8/docs/api/java/util/concurrent/ThreadPoolExecutor.html for technical details + +# When the number of threads is greater than the core (see outputbuffer_processor_threads_core_pool_size), +# this is the maximum time in milliseconds that excess idle threads will wait for new tasks before terminating. +# Default: 5000 +#outputbuffer_processor_keep_alive_time = 5000 + +# The number of threads to keep in the pool, even if they are idle, unless allowCoreThreadTimeOut is set +# Default: 3 +#outputbuffer_processor_threads_core_pool_size = 3 + +# The maximum number of threads to allow in the pool +# Default: 30 +#outputbuffer_processor_threads_max_pool_size = 30 + +# UDP receive buffer size for all message inputs (e. g. SyslogUDPInput). +#udp_recvbuffer_sizes = 1048576 + +# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping) +# Possible types: +# - yielding +# Compromise between performance and CPU usage. +# - sleeping +# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods. +# - blocking +# High throughput, low latency, higher CPU usage. +# - busy_spinning +# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores. +processor_wait_strategy = blocking + +# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore. +# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache. +# Must be a power of 2. (512, 1024, 2048, ...) +ring_size = 65536 + +inputbuffer_ring_size = 65536 +inputbuffer_processors = 2 +inputbuffer_wait_strategy = blocking + +# Enable the disk based message journal. +message_journal_enabled = true + +# The directory which will be used to store the message journal. The directory must me exclusively used by Graylog and +# must not contain any other files than the ones created by Graylog itself. +# +# ATTENTION: +# If you create a seperate partition for the journal files and use a file system creating directories like 'lost+found' +# in the root directory, you need to create a sub directory for your journal. +# Otherwise Graylog will log an error message that the journal is corrupt and Graylog will not start. +message_journal_dir = /usr/share/graylog/data/journal + +# Journal hold messages before they could be written to Elasticsearch. +# For a maximum of 12 hours or 5 GB whichever happens first. +# During normal operation the journal will be smaller. +#message_journal_max_age = 12h +#message_journal_max_size = 5gb + +#message_journal_flush_age = 1m +#message_journal_flush_interval = 1000000 +#message_journal_segment_age = 1h +#message_journal_segment_size = 100mb + +# Number of threads used exclusively for dispatching internal events. Default is 2. +#async_eventbus_processors = 2 + +# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual +# shutdown process. Set to 0 if you have no status checking load balancers in front. +lb_recognition_period_seconds = 3 + +# Journal usage percentage that triggers requesting throttling for this server node from load balancers. The feature is +# disabled if not set. +#lb_throttle_threshold_percentage = 95 + +# Every message is matched against the configured streams and it can happen that a stream contains rules which +# take an unusual amount of time to run, for example if its using regular expressions that perform excessive backtracking. +# This will impact the processing of the entire server. To keep such misbehaving stream rules from impacting other +# streams, Graylog limits the execution time for each stream. +# The default values are noted below, the timeout is in milliseconds. +# If the stream matching for one stream took longer than the timeout value, and this happened more than "max_faults" times +# that stream is disabled and a notification is shown in the web interface. +#stream_processing_timeout = 2000 +#stream_processing_max_faults = 3 + +# Length of the interval in seconds in which the alert conditions for all streams should be checked +# and alarms are being sent. +#alert_check_interval = 60 + +# Since 0.21 the Graylog server supports pluggable output modules. This means a single message can be written to multiple +# outputs. The next setting defines the timeout for a single output module, including the default output module where all +# messages end up. +# +# Time in milliseconds to wait for all message outputs to finish writing a single message. +#output_module_timeout = 10000 + +# Time in milliseconds after which a detected stale master node is being rechecked on startup. +#stale_master_timeout = 2000 + +# Time in milliseconds which Graylog is waiting for all threads to stop on shutdown. +#shutdown_timeout = 30000 + +# MongoDB connection string +# See https://docs.mongodb.com/manual/reference/connection-string/ for details +mongodb_uri = mongodb://mongo/graylog + +# Authenticate against the MongoDB server +#mongodb_uri = mongodb://grayloguser:secret@mongo:27017/graylog + +# Use a replica set instead of a single host +#mongodb_uri = mongodb://grayloguser:secret@mongo:27017,mongo:27018,mongo:27019/graylog + +# Increase this value according to the maximum connections your MongoDB server can handle from a single client +# if you encounter MongoDB connection problems. +mongodb_max_connections = 100 + +# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5 +# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, +# then 500 threads can block. More than that and an exception will be thrown. +# http://api.mongodb.com/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier +mongodb_threads_allowed_to_block_multiplier = 5 + +# Drools Rule File (Use to rewrite incoming log messages) +# See: http://docs.graylog.org/en/2.1/pages/drools.html +#rules_file = /etc/graylog/server/rules.drl + +# Email transport +#transport_email_enabled = false +#transport_email_hostname = mail.example.com +#transport_email_port = 587 +#transport_email_use_auth = true +#transport_email_use_tls = true +#transport_email_use_ssl = true +#transport_email_auth_username = you@example.com +#transport_email_auth_password = secret +#transport_email_subject_prefix = [graylog] +#transport_email_from_email = graylog@example.com + +# Specify and uncomment this if you want to include links to the stream in your stream alert mails. +# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users. +#transport_email_web_interface_url = https://graylog.example.com + +# The default connect timeout for outgoing HTTP connections. +# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). +# Default: 5s +#http_connect_timeout = 5s + +# The default read timeout for outgoing HTTP connections. +# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). +# Default: 10s +#http_read_timeout = 10s + +# The default write timeout for outgoing HTTP connections. +# Values must be a positive duration (and between 1 and 2147483647 when converted to milliseconds). +# Default: 10s +#http_write_timeout = 10s + +# HTTP proxy for outgoing HTTP connections +#http_proxy_uri = + +# The threshold of the garbage collection runs. If GC runs take longer than this threshold, a system notification +# will be generated to warn the administrator about possible problems with the system. Default is 1 second. +#gc_warning_threshold = 1s + +# Connection timeout for a configured LDAP server (e. g. ActiveDirectory) in milliseconds. +#ldap_connection_timeout = 2000 + +# Disable the use of SIGAR for collecting system stats +#disable_sigar = false + +# The default cache time for dashboard widgets. (Default: 10 seconds, minimum: 1 second) +#dashboard_widget_default_cache_time = 10s + +# Automatically load content packs in "content_packs_dir" on the first start of Graylog. +content_packs_loader_enabled = true + +# The directory which contains content packs which should be loaded on the first start of Graylog. +content_packs_dir = /usr/share/graylog/data/contentpacks + +# A comma-separated list of content packs (files in "content_packs_dir") which should be applied on +# the first start of Graylog. +# Default: empty +content_packs_auto_load = grok-patterns.json + +# For some cluster-related REST requests, the node must query all other nodes in the cluster. This is the maximum number +# of threads available for this. Increase it, if '/cluster/*' requests take long to complete. +# Should be http_thread_pool_size * average_cluster_size if you have a high number of concurrent users. +proxied_requests_thread_pool_size = 32 diff --git a/graylog/config/log4j2.xml b/graylog/config/log4j2.xml new file mode 100644 index 00000000..03d1d12d --- /dev/null +++ b/graylog/config/log4j2.xml @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/ide-theia/Dockerfile b/ide-theia/Dockerfile index 39f2c1b6..9824b6a9 100644 --- a/ide-theia/Dockerfile +++ b/ide-theia/Dockerfile @@ -2,4 +2,8 @@ FROM theiaide/theia LABEL maintainer="ahkui " -RUN echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf \ No newline at end of file +USER root + +RUN echo 'fs.inotify.max_user_watches=524288' >> /etc/sysctl.conf + +USER theia diff --git a/ipython/Dockerfile.controller b/ipython/Dockerfile.controller index 3acb62d9..d325c6f8 100644 --- a/ipython/Dockerfile.controller +++ b/ipython/Dockerfile.controller @@ -4,7 +4,7 @@ LABEL maintainer="ahkui " USER root -RUN apk add --no-cache build-base +RUN apk add --no-cache build-base zeromq-dev RUN python -m pip --quiet --no-cache-dir install \ ipyparallel @@ -14,4 +14,4 @@ RUN ipython profile create --parallel --profile=default COPY ipcontroller-client.json /root/.ipython/profile_default/security/ipcontroller-client.json COPY ipcontroller-engine.json /root/.ipython/profile_default/security/ipcontroller-engine.json -CMD ["sh","-c","ipcontroller --ip=* --reuse"] \ No newline at end of file +CMD ["sh","-c","ipcontroller --ip=* --reuse"] diff --git a/ipython/Dockerfile.engine b/ipython/Dockerfile.engine index 5f709b61..b0ff3fca 100644 --- a/ipython/Dockerfile.engine +++ b/ipython/Dockerfile.engine @@ -4,7 +4,7 @@ LABEL maintainer="ahkui " USER root -RUN apk add --no-cache build-base +RUN apk add --no-cache build-base zeromq-dev RUN python -m pip --quiet --no-cache-dir install \ ipyparallel \ diff --git a/jenkins/Dockerfile b/jenkins/Dockerfile index cb12f4b9..df66c423 100644 --- a/jenkins/Dockerfile +++ b/jenkins/Dockerfile @@ -108,3 +108,7 @@ COPY install-plugins.sh /usr/local/bin/install-plugins.sh #RUN chmod 644 /var/jenkins_home/.ssh/id_rsa.pub ## ssh-keyscan -H github.com >> ~/.ssh/known_hosts ## ssh-keyscan -H bitbucket.org >> ~/.ssh/known_hosts + +# Fix docker permission denied error +USER root +RUN usermod -aG docker jenkins diff --git a/jupyterhub/Dockerfile b/jupyterhub/Dockerfile index 2016f773..ddea0be0 100644 --- a/jupyterhub/Dockerfile +++ b/jupyterhub/Dockerfile @@ -10,6 +10,7 @@ ENV JUPYTERHUB_OAUTH_CALLBACK_URL ${JUPYTERHUB_OAUTH_CALLBACK_URL} ENV JUPYTERHUB_OAUTH_CLIENT_ID ${JUPYTERHUB_OAUTH_CLIENT_ID} ENV JUPYTERHUB_OAUTH_CLIENT_SECRET ${JUPYTERHUB_OAUTH_CLIENT_SECRET} ENV JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE ${JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE} +ENV JUPYTERHUB_ENABLE_NVIDIA ${JUPYTERHUB_ENABLE_NVIDIA} RUN curl -sL https://deb.nodesource.com/setup_10.x | bash - diff --git a/jupyterhub/jupyterhub_config.py b/jupyterhub/jupyterhub_config.py index 612296c6..e8da1b82 100644 --- a/jupyterhub/jupyterhub_config.py +++ b/jupyterhub/jupyterhub_config.py @@ -6,6 +6,9 @@ import os c = get_config() +# create system users that don't exist yet +c.LocalAuthenticator.create_system_users = True + def create_dir_hook(spawner): username = spawner.user.name # get the username volume_path = os.path.join('/user-data', username) @@ -45,8 +48,12 @@ network_name = os.environ.get('JUPYTERHUB_NETWORK_NAME','laradock_backend') c.DockerSpawner.use_internal_ip = True c.DockerSpawner.network_name = network_name +enable_nvidia = os.environ.get('JUPYTERHUB_ENABLE_NVIDIA','false') # Pass the network name as argument to spawned containers -c.DockerSpawner.extra_host_config = { 'network_mode': network_name, 'runtime': 'nvidia' } +c.DockerSpawner.extra_host_config = { 'network_mode': network_name } +if 'true' == enable_nvidia: + c.DockerSpawner.extra_host_config = { 'network_mode': network_name, 'runtime': 'nvidia' } + pass # c.DockerSpawner.extra_host_config = { 'network_mode': network_name, "devices":["/dev/nvidiactl","/dev/nvidia-uvm","/dev/nvidia0"] } # Explicitly set notebook directory because we'll be mounting a host volume to # it. Most jupyter/docker-stacks *-notebook images run the Notebook server as diff --git a/kibana/Dockerfile b/kibana/Dockerfile index b97bc196..badfd80a 100644 --- a/kibana/Dockerfile +++ b/kibana/Dockerfile @@ -1,3 +1,3 @@ -FROM docker.elastic.co/kibana/kibana:6.2.3 +FROM docker.elastic.co/kibana/kibana:6.6.0 EXPOSE 5601 diff --git a/laravel-echo-server/Dockerfile b/laravel-echo-server/Dockerfile index 6a338f45..da6b2561 100644 --- a/laravel-echo-server/Dockerfile +++ b/laravel-echo-server/Dockerfile @@ -19,4 +19,4 @@ RUN npm install COPY laravel-echo-server.json /usr/src/app/laravel-echo-server.json EXPOSE 3000 -CMD [ "npm", "start" ] +CMD [ "npm", "start", "--force" ] diff --git a/laravel-echo-server/package.json b/laravel-echo-server/package.json index 2784a039..4e8d6c1f 100644 --- a/laravel-echo-server/package.json +++ b/laravel-echo-server/package.json @@ -4,9 +4,9 @@ "version": "0.0.1", "license": "MIT", "dependencies": { - "laravel-echo-server": "^1.2.8" + "laravel-echo-server": "^1.5.0" }, "scripts": { "start": "laravel-echo-server start" } -} \ No newline at end of file +} diff --git a/laravel-horizon/Dockerfile b/laravel-horizon/Dockerfile index 35a69554..ee5b9ffd 100644 --- a/laravel-horizon/Dockerfile +++ b/laravel-horizon/Dockerfile @@ -20,10 +20,11 @@ RUN apk --update add wget \ autoconf \ cyrus-sasl-dev \ libgsasl-dev \ - supervisor + supervisor \ + procps RUN docker-php-ext-install mysqli mbstring pdo pdo_mysql tokenizer xml pcntl -RUN pecl channel-update pecl.php.net && pecl install memcached mcrypt-1.0.1 && docker-php-ext-enable memcached +RUN pecl channel-update pecl.php.net && pecl install memcached mcrypt-1.0.1 mongodb && docker-php-ext-enable memcached mongodb #Install BCMath package: ARG INSTALL_BCMATH=false @@ -31,6 +32,12 @@ RUN if [ ${INSTALL_BCMATH} = true ]; then \ docker-php-ext-install bcmath \ ;fi +#Install Sockets package: +ARG INSTALL_SOCKETS=false +RUN if [ ${INSTALL_SOCKETS} = true ]; then \ + docker-php-ext-install sockets \ + ;fi + # Install PostgreSQL drivers: ARG INSTALL_PGSQL=false RUN if [ ${INSTALL_PGSQL} = true ]; then \ @@ -38,6 +45,28 @@ RUN if [ ${INSTALL_PGSQL} = true ]; then \ && docker-php-ext-install pdo_pgsql \ ;fi +# Install Cassandra drivers: +ARG INSTALL_CASSANDRA=false +RUN if [ ${INSTALL_CASSANDRA} = true ]; then \ + apk --update add cassandra-cpp-driver \ + ;fi + +WORKDIR /usr/src +RUN if [ ${INSTALL_CASSANDRA} = true ]; then \ + git clone https://github.com/datastax/php-driver.git \ + && cd php-driver/ext \ + && phpize \ + && mkdir -p /usr/src/php-driver/build \ + && cd /usr/src/php-driver/build \ + && ../ext/configure > /dev/null \ + && make clean >/dev/null \ + && make >/dev/null 2>&1 \ + && make install \ + && docker-php-ext-enable cassandra \ +;fi + + + ########################################################################### # PHP Memcached: ########################################################################### diff --git a/logstash/Dockerfile b/logstash/Dockerfile new file mode 100644 index 00000000..a8c54527 --- /dev/null +++ b/logstash/Dockerfile @@ -0,0 +1,10 @@ +FROM docker.elastic.co/logstash/logstash:6.4.2 + +USER root +RUN rm -f /usr/share/logstash/pipeline/logstash.conf +RUN curl -L -o /usr/share/logstash/lib/mysql-connector-java-5.1.47.jar https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.47/mysql-connector-java-5.1.47.jar +ADD ./pipeline/ /usr/share/logstash/pipeline/ +ADD ./config/ /usr/share/logstash/config/ + +RUN logstash-plugin install logstash-input-jdbc + diff --git a/logstash/config/logstash.yml b/logstash/config/logstash.yml new file mode 100644 index 00000000..c3447173 --- /dev/null +++ b/logstash/config/logstash.yml @@ -0,0 +1,5 @@ +http.host: "0.0.0.0" + +xpack.monitoring.enabled: false +config.reload.automatic: true +path.config: "/usr/share/logstash/pipeline" diff --git a/nginx/ssl/.gitkeep b/logstash/pipeline/.gitkeep similarity index 100% rename from nginx/ssl/.gitkeep rename to logstash/pipeline/.gitkeep diff --git a/manticore/Dockerfile b/manticore/Dockerfile new file mode 100644 index 00000000..2b78830a --- /dev/null +++ b/manticore/Dockerfile @@ -0,0 +1,5 @@ +FROM manticoresearch/manticore + +EXPOSE 9306 +EXPOSE 9308 +EXPOSE 9312 diff --git a/manticore/config/sphinx.conf b/manticore/config/sphinx.conf new file mode 100644 index 00000000..0a992b97 --- /dev/null +++ b/manticore/config/sphinx.conf @@ -0,0 +1,25 @@ +index testrt { + type = rt + rt_mem_limit = 128M + path = /var/lib/manticore/data/testrt + rt_field = title + rt_field = content + rt_attr_uint = gid +} + +searchd { + listen = 9312 + listen = 9308:http + listen = 9306:mysql41 + log = /var/lib/manticore/log/searchd.log + # you can also send query_log to /dev/stdout to be shown in docker logs + query_log = /var/lib/manticore/log/query.log + read_timeout = 5 + max_children = 30 + pid_file = /var/run/searchd.pid + seamless_rotate = 1 + preopen_indexes = 1 + unlink_old = 1 + binlog_path = /var/lib/manticore/data +} + diff --git a/mariadb/Dockerfile b/mariadb/Dockerfile index 0dcb9481..1d048265 100644 --- a/mariadb/Dockerfile +++ b/mariadb/Dockerfile @@ -1,7 +1,15 @@ -FROM mariadb:latest +ARG MARIADB_VERSION=latest +FROM mariadb:${MARIADB_VERSION} LABEL maintainer="Mahmoud Zalt " +##################################### +# Set Timezone +##################################### + +ARG TZ=UTC +ENV TZ ${TZ} +RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone && chown -R mysql:root /var/lib/mysql/ COPY my.cnf /etc/mysql/conf.d/my.cnf CMD ["mysqld"] diff --git a/mosquitto/Dockerfile b/mosquitto/Dockerfile new file mode 100644 index 00000000..7fb0e733 --- /dev/null +++ b/mosquitto/Dockerfile @@ -0,0 +1,5 @@ +FROM eclipse-mosquitto:latest + +LABEL maintainer="Luis Coutinho " + +COPY mosquitto.conf /mosquitto/config/ diff --git a/mosquitto/mosquitto.conf b/mosquitto/mosquitto.conf new file mode 100644 index 00000000..03e80f2d --- /dev/null +++ b/mosquitto/mosquitto.conf @@ -0,0 +1,838 @@ +# Config file for mosquitto +# +# See mosquitto.conf(5) for more information. +# +# Default values are shown, uncomment to change. +# +# Use the # character to indicate a comment, but only if it is the +# very first character on the line. + +# ================================================================= +# General configuration +# ================================================================= + +# Time in seconds to wait before resending an outgoing QoS=1 or +# QoS=2 message. +#retry_interval 20 + +# Time in seconds between updates of the $SYS tree. +# Set to 0 to disable the publishing of the $SYS tree. +#sys_interval 10 + +# Time in seconds between cleaning the internal message store of +# unreferenced messages. Lower values will result in lower memory +# usage but more processor time, higher values will have the +# opposite effect. +# Setting a value of 0 means the unreferenced messages will be +# disposed of as quickly as possible. +#store_clean_interval 10 + +# Write process id to a file. Default is a blank string which means +# a pid file shouldn't be written. +# This should be set to /var/run/mosquitto.pid if mosquitto is +# being run automatically on boot with an init script and +# start-stop-daemon or similar. +#pid_file + +# When run as root, drop privileges to this user and its primary +# group. +# Leave blank to stay as root, but this is not recommended. +# If run as a non-root user, this setting has no effect. +# Note that on Windows this has no effect and so mosquitto should +# be started by the user you wish it to run as. +#user mosquitto + +# The maximum number of QoS 1 and 2 messages currently inflight per +# client. +# This includes messages that are partway through handshakes and +# those that are being retried. Defaults to 20. Set to 0 for no +# maximum. Setting to 1 will guarantee in-order delivery of QoS 1 +# and 2 messages. +#max_inflight_messages 20 + +# The maximum number of QoS 1 and 2 messages to hold in a queue +# above those that are currently in-flight. Defaults to 100. Set +# to 0 for no maximum (not recommended). +# See also queue_qos0_messages. +#max_queued_messages 100 + +# Set to true to queue messages with QoS 0 when a persistent client is +# disconnected. These messages are included in the limit imposed by +# max_queued_messages. +# Defaults to false. +# This is a non-standard option for the MQTT v3.1 spec but is allowed in +# v3.1.1. +#queue_qos0_messages false + +# This option sets the maximum publish payload size that the broker will allow. +# Received messages that exceed this size will not be accepted by the broker. +# The default value is 0, which means that all valid MQTT messages are +# accepted. MQTT imposes a maximum payload size of 268435455 bytes. +#message_size_limit 0 + +# This option controls whether a client is allowed to connect with a zero +# length client id or not. This option only affects clients using MQTT v3.1.1 +# and later. If set to false, clients connecting with a zero length client id +# are disconnected. If set to true, clients will be allocated a client id by +# the broker. This means it is only useful for clients with clean session set +# to true. +#allow_zero_length_clientid true + +# If allow_zero_length_clientid is true, this option allows you to set a prefix +# to automatically generated client ids to aid visibility in logs. +#auto_id_prefix + +# This option allows persistent clients (those with clean session set to false) +# to be removed if they do not reconnect within a certain time frame. +# +# This is a non-standard option in MQTT V3.1 but allowed in MQTT v3.1.1. +# +# Badly designed clients may set clean session to false whilst using a randomly +# generated client id. This leads to persistent clients that will never +# reconnect. This option allows these clients to be removed. +# +# The expiration period should be an integer followed by one of h d w m y for +# hour, day, week, month and year respectively. For example +# +# persistent_client_expiration 2m +# persistent_client_expiration 14d +# persistent_client_expiration 1y +# +# The default if not set is to never expire persistent clients. +#persistent_client_expiration + +# If a client is subscribed to multiple subscriptions that overlap, e.g. foo/# +# and foo/+/baz , then MQTT expects that when the broker receives a message on +# a topic that matches both subscriptions, such as foo/bar/baz, then the client +# should only receive the message once. +# Mosquitto keeps track of which clients a message has been sent to in order to +# meet this requirement. The allow_duplicate_messages option allows this +# behaviour to be disabled, which may be useful if you have a large number of +# clients subscribed to the same set of topics and are very concerned about +# minimising memory usage. +# It can be safely set to true if you know in advance that your clients will +# never have overlapping subscriptions, otherwise your clients must be able to +# correctly deal with duplicate messages even when then have QoS=2. +#allow_duplicate_messages false + +# The MQTT specification requires that the QoS of a message delivered to a +# subscriber is never upgraded to match the QoS of the subscription. Enabling +# this option changes this behaviour. If upgrade_outgoing_qos is set true, +# messages sent to a subscriber will always match the QoS of its subscription. +# This is a non-standard option explicitly disallowed by the spec. +#upgrade_outgoing_qos false + +# ================================================================= +# Default listener +# ================================================================= + +# IP address/hostname to bind the default listener to. If not +# given, the default listener will not be bound to a specific +# address and so will be accessible to all network interfaces. +# bind_address ip-address/host name +#bind_address + +# Port to use for the default listener. +port 9001 + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# Choose the protocol to use when listening. +# This can be either mqtt or websockets. +# Websockets support is currently disabled by default at compile time. +# Certificate based TLS may be used with websockets, except that +# only the cafile, certfile, keyfile and ciphers options are supported. +protocol websockets + +# When a listener is using the websockets protocol, it is possible to serve +# http data as well. Set http_dir to a directory which contains the files you +# wish to serve. If this option is not specified, then no normal http +# connections will be possible. +#http_dir + +# Set use_username_as_clientid to true to replace the clientid that a client +# connected with with its username. This allows authentication to be tied to +# the clientid, which means that it is possible to prevent one client +# disconnecting another by using the same clientid. +# If a client connects with no username it will be disconnected as not +# authorised when this option is set to true. +# Do not use in conjunction with clientid_prefixes. +# See also use_identity_as_username. +#use_username_as_clientid + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS +# is 8883, but this must be set manually. +# +# See also the mosquitto-tls man page. + +# At least one of cafile or capath must be defined. They both +# define methods of accessing the PEM encoded Certificate +# Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# This option defines the version of the TLS protocol to use for this listener. +# The default value allows v1.2, v1.1 and v1.0, if they are all supported by +# the version of openssl that the broker was compiled against. For openssl >= +# 1.0.1 the valid values are tlsv1.2 tlsv1.1 and tlsv1. For openssl < 1.0.1 the +# valid values are tlsv1. +#tls_version + +# By default a TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +# If unset defaults to DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH +#ciphers DEFAULT:!aNULL:!eNULL:!LOW:!EXPORT:!SSLv2:@STRENGTH + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Extra listeners +# ================================================================= + +# Listen on a port/ip address combination. By using this variable +# multiple times, mosquitto can listen on more than one port. If +# this variable is used and neither bind_address nor port given, +# then the default listener will not be started. +# The port number to listen on must be given. Optionally, an ip +# address or host name may be supplied as a second argument. In +# this case, mosquitto will attempt to bind the listener to that +# address and so restrict access to the associated network and +# interface. By default, mosquitto will listen on all interfaces. +# Note that for a websockets listener it is not possible to bind to a host +# name. +# listener port-number [ip address/host name] +#listener + +# The maximum number of client connections to allow. This is +# a per listener setting. +# Default is -1, which means unlimited connections. +# Note that other process limits mean that unlimited connections +# are not really possible. Typically the default maximum number of +# connections possible is around 1024. +#max_connections -1 + +# The listener can be restricted to operating within a topic hierarchy using +# the mount_point option. This is achieved be prefixing the mount_point string +# to all topics for any clients connected to this listener. This prefixing only +# happens internally to the broker; the client will not see the prefix. +#mount_point + +# Choose the protocol to use when listening. +# This can be either mqtt or websockets. +# Certificate based TLS may be used with websockets, except that only the +# cafile, certfile, keyfile and ciphers options are supported. +#protocol mqtt + +# When a listener is using the websockets protocol, it is possible to serve +# http data as well. Set http_dir to a directory which contains the files you +# wish to serve. If this option is not specified, then no normal http +# connections will be possible. +#http_dir + +# Set use_username_as_clientid to true to replace the clientid that a client +# connected with with its username. This allows authentication to be tied to +# the clientid, which means that it is possible to prevent one client +# disconnecting another by using the same clientid. +# If a client connects with no username it will be disconnected as not +# authorised when this option is set to true. +# Do not use in conjunction with clientid_prefixes. +# See also use_identity_as_username. +#use_username_as_clientid + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable certificate based SSL/TLS support +# for this listener. Note that the recommended port for MQTT over TLS is 8883, +# but this must be set manually. +# +# See also the mosquitto-tls man page and the "Pre-shared-key based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# At least one of cafile or capath must be defined to enable certificate based +# TLS encryption. They both define methods of accessing the PEM encoded +# Certificate Authority certificates that have signed your server certificate +# and that you wish to trust. +# cafile defines the path to a file containing the CA certificates. +# capath defines a directory that will be searched for files +# containing the CA certificates. For capath to work correctly, the +# certificate files must have ".crt" as the file ending and you must run +# "c_rehash " each time you add/remove a certificate. +#cafile +#capath + +# Path to the PEM encoded server certificate. +#certfile + +# Path to the PEM encoded keyfile. +#keyfile + +# By default an TLS enabled listener will operate in a similar fashion to a +# https enabled web server, in that the server has a certificate signed by a CA +# and the client will verify that it is a trusted certificate. The overall aim +# is encryption of the network traffic. By setting require_certificate to true, +# the client must provide a valid certificate in order for the network +# connection to proceed. This allows access to the broker to be controlled +# outside of the mechanisms provided by MQTT. +#require_certificate false + +# If require_certificate is true, you may set use_identity_as_username to true +# to use the CN value from the client certificate as a username. If this is +# true, the password_file option will not be used for this listener. +#use_identity_as_username false + +# If you have require_certificate set to true, you can create a certificate +# revocation list file to revoke access to particular client certificates. If +# you have done this, use crlfile to point to the PEM encoded revocation file. +#crlfile + +# If you wish to control which encryption ciphers are used, use the ciphers +# option. The list of available ciphers can be optained using the "openssl +# ciphers" command and should be provided in the same format as the output of +# that command. +#ciphers + +# ----------------------------------------------------------------- +# Pre-shared-key based SSL/TLS support +# ----------------------------------------------------------------- +# The following options can be used to enable PSK based SSL/TLS support for +# this listener. Note that the recommended port for MQTT over TLS is 8883, but +# this must be set manually. +# +# See also the mosquitto-tls man page and the "Certificate based SSL/TLS +# support" section. Only one of certificate or PSK encryption support can be +# enabled for any listener. + +# The psk_hint option enables pre-shared-key support for this listener and also +# acts as an identifier for this listener. The hint is sent to clients and may +# be used locally to aid authentication. The hint is a free form string that +# doesn't have much meaning in itself, so feel free to be creative. +# If this option is provided, see psk_file to define the pre-shared keys to be +# used or create a security plugin to handle them. +#psk_hint + +# Set use_identity_as_username to have the psk identity sent by the client used +# as its username. Authentication will be carried out using the PSK rather than +# the MQTT username/password and so password_file will not be used for this +# listener. +#use_identity_as_username false + +# When using PSK, the encryption ciphers used will be chosen from the list of +# available PSK ciphers. If you want to control which ciphers are available, +# use the "ciphers" option. The list of available ciphers can be optained +# using the "openssl ciphers" command and should be provided in the same format +# as the output of that command. +#ciphers + +# ================================================================= +# Persistence +# ================================================================= + +# If persistence is enabled, save the in-memory database to disk +# every autosave_interval seconds. If set to 0, the persistence +# database will only be written when mosquitto exits. See also +# autosave_on_changes. +# Note that writing of the persistence database can be forced by +# sending mosquitto a SIGUSR1 signal. +#autosave_interval 1800 + +# If true, mosquitto will count the number of subscription changes, retained +# messages received and queued messages and if the total exceeds +# autosave_interval then the in-memory database will be saved to disk. +# If false, mosquitto will save the in-memory database to disk by treating +# autosave_interval as a time in seconds. +#autosave_on_changes false + +# Save persistent message data to disk (true/false). +# This saves information about all messages, including +# subscriptions, currently in-flight messages and retained +# messages. +# retained_persistence is a synonym for this option. +persistence true + +# The filename to use for the persistent database, not including +# the path. +#persistence_file mosquitto.db + +# Location for persistent database. Must include trailing / +# Default is an empty string (current directory). +# Set to e.g. /var/lib/mosquitto/ if running as a proper service on Linux or +# similar. +persistence_location /mosquitto/data/ + +# ================================================================= +# Logging +# ================================================================= + +# Places to log to. Use multiple log_dest lines for multiple +# logging destinations. +# Possible destinations are: stdout stderr syslog topic file +# +# stdout and stderr log to the console on the named output. +# +# syslog uses the userspace syslog facility which usually ends up +# in /var/log/messages or similar. +# +# topic logs to the broker topic '$SYS/broker/log/', +# where severity is one of D, E, W, N, I, M which are debug, error, +# warning, notice, information and message. Message type severity is used by +# the subscribe/unsubscribe log_types and publishes log messages to +# $SYS/broker/log/M/susbcribe or $SYS/broker/log/M/unsubscribe. +# +# The file destination requires an additional parameter which is the file to be +# logged to, e.g. "log_dest file /var/log/mosquitto.log". The file will be +# closed and reopened when the broker receives a HUP signal. Only a single file +# destination may be configured. +# +# Note that if the broker is running as a Windows service it will default to +# "log_dest none" and neither stdout nor stderr logging is available. +# Use "log_dest none" if you wish to disable logging. +log_dest file /mosquitto/log/mosquitto.log + +# If using syslog logging (not on Windows), messages will be logged to the +# "daemon" facility by default. Use the log_facility option to choose which of +# local0 to local7 to log to instead. The option value should be an integer +# value, e.g. "log_facility 5" to use local5. +#log_facility + +# Types of messages to log. Use multiple log_type lines for logging +# multiple types of messages. +# Possible types are: debug, error, warning, notice, information, +# none, subscribe, unsubscribe, websockets, all. +# Note that debug type messages are for decoding the incoming/outgoing +# network packets. They are not logged in "topics". +log_type error +log_type warning +log_type notice +log_type information +log_type all + +# Change the websockets logging level. This is a global option, it is not +# possible to set per listener. This is an integer that is interpreted by +# libwebsockets as a bit mask for its lws_log_levels enum. See the +# libwebsockets documentation for more details. "log_type websockets" must also +# be enabled. +#websockets_log_level 0 + +# If set to true, client connection and disconnection messages will be included +# in the log. +#connection_messages true + +# If set to true, add a timestamp value to each log message. +#log_timestamp true + +# ================================================================= +# Security +# ================================================================= + +# If set, only clients that have a matching prefix on their +# clientid will be allowed to connect to the broker. By default, +# all clients may connect. +# For example, setting "secure-" here would mean a client "secure- +# client" could connect but another with clientid "mqtt" couldn't. +#clientid_prefixes + +# Boolean value that determines whether clients that connect +# without providing a username are allowed to connect. If set to +# false then a password file should be created (see the +# password_file option) to control authenticated client access. +# Defaults to true. +#allow_anonymous true + +# In addition to the clientid_prefixes, allow_anonymous and TLS +# authentication options, username based authentication is also +# possible. The default support is described in "Default +# authentication and topic access control" below. The auth_plugin +# allows another authentication method to be used. +# Specify the path to the loadable plugin and see the +# "Authentication and topic access plugin options" section below. +#auth_plugin + +# If auth_plugin_deny_special_chars is true, the default, then before an ACL +# check is made, the username/client id of the client needing the check is +# searched for the presence of either a '+' or '#' character. If either of +# these characters is found in either the username or client id, then the ACL +# check is denied before it is sent to the plugin.o +# +# This check prevents the case where a malicious user could circumvent an ACL +# check by using one of these characters as their username or client id. This +# is the same issue as was reported with mosquitto itself as CVE-2017-7650. +# +# If you are entirely sure that the plugin you are using is not vulnerable to +# this attack (i.e. if you never use usernames or client ids in topics) then +# you can disable this extra check and have all ACL checks delivered to your +# plugin by setting auth_plugin_deny_special_chars to false. +#auth_plugin_deny_special_chars true + +# ----------------------------------------------------------------- +# Default authentication and topic access control +# ----------------------------------------------------------------- + +# Control access to the broker using a password file. This file can be +# generated using the mosquitto_passwd utility. If TLS support is not compiled +# into mosquitto (it is recommended that TLS support should be included) then +# plain text passwords are used, in which case the file should be a text file +# with lines in the format: +# username:password +# The password (and colon) may be omitted if desired, although this +# offers very little in the way of security. +# +# See the TLS client require_certificate and use_identity_as_username options +# for alternative authentication options. +#password_file + +# Access may also be controlled using a pre-shared-key file. This requires +# TLS-PSK support and a listener configured to use it. The file should be text +# lines in the format: +# identity:key +# The key should be in hexadecimal format without a leading "0x". +#psk_file + +# Control access to topics on the broker using an access control list +# file. If this parameter is defined then only the topics listed will +# have access. +# If the first character of a line of the ACL file is a # it is treated as a +# comment. +# Topic access is added with lines of the format: +# +# topic [read|write|readwrite] +# +# The access type is controlled using "read", "write" or "readwrite". This +# parameter is optional (unless contains a space character) - if not +# given then the access is read/write. can contain the + or # +# wildcards as in subscriptions. +# +# The first set of topics are applied to anonymous clients, assuming +# allow_anonymous is true. User specific topic ACLs are added after a +# user line as follows: +# +# user +# +# The username referred to here is the same as in password_file. It is +# not the clientid. +# +# +# If is also possible to define ACLs based on pattern substitution within the +# topic. The patterns available for substition are: +# +# %c to match the client id of the client +# %u to match the username of the client +# +# The substitution pattern must be the only text for that level of hierarchy. +# +# The form is the same as for the topic keyword, but using pattern as the +# keyword. +# Pattern ACLs apply to all users even if the "user" keyword has previously +# been given. +# +# If using bridges with usernames and ACLs, connection messages can be allowed +# with the following pattern: +# pattern write $SYS/broker/connection/%c/state +# +# pattern [read|write|readwrite] +# +# Example: +# +# pattern write sensor/%u/data +# +#acl_file + +# ----------------------------------------------------------------- +# Authentication and topic access plugin options +# ----------------------------------------------------------------- + +# If the auth_plugin option above is used, define options to pass to the +# plugin here as described by the plugin instructions. All options named +# using the format auth_opt_* will be passed to the plugin, for example: +# +# auth_opt_db_host +# auth_opt_db_port +# auth_opt_db_username +# auth_opt_db_password + + +# ================================================================= +# Bridges +# ================================================================= + +# A bridge is a way of connecting multiple MQTT brokers together. +# Create a new bridge using the "connection" option as described below. Set +# options for the bridges using the remaining parameters. You must specify the +# address and at least one topic to subscribe to. +# Each connection must have a unique name. +# The address line may have multiple host address and ports specified. See +# below in the round_robin description for more details on bridge behaviour if +# multiple addresses are used. +# The direction that the topic will be shared can be chosen by +# specifying out, in or both, where the default value is out. +# The QoS level of the bridged communication can be specified with the next +# topic option. The default QoS level is 0, to change the QoS the topic +# direction must also be given. +# The local and remote prefix options allow a topic to be remapped when it is +# bridged to/from the remote broker. This provides the ability to place a topic +# tree in an appropriate location. +# For more details see the mosquitto.conf man page. +# Multiple topics can be specified per connection, but be careful +# not to create any loops. +# If you are using bridges with cleansession set to false (the default), then +# you may get unexpected behaviour from incoming topics if you change what +# topics you are subscribing to. This is because the remote broker keeps the +# subscription for the old topic. If you have this problem, connect your bridge +# with cleansession set to true, then reconnect with cleansession set to false +# as normal. +#connection +#address [:] [[:]] +#topic [[[out | in | both] qos-level] local-prefix remote-prefix] + +# Set the version of the MQTT protocol to use with for this bridge. Can be one +# of mqttv31 or mqttv311. Defaults to mqttv31. +#bridge_protocol_version mqttv31 + +# If a bridge has topics that have "out" direction, the default behaviour is to +# send an unsubscribe request to the remote broker on that topic. This means +# that changing a topic direction from "in" to "out" will not keep receiving +# incoming messages. Sending these unsubscribe requests is not always +# desirable, setting bridge_attempt_unsubscribe to false will disable sending +# the unsubscribe request. +#bridge_attempt_unsubscribe true + +# If the bridge has more than one address given in the address/addresses +# configuration, the round_robin option defines the behaviour of the bridge on +# a failure of the bridge connection. If round_robin is false, the default +# value, then the first address is treated as the main bridge connection. If +# the connection fails, the other secondary addresses will be attempted in +# turn. Whilst connected to a secondary bridge, the bridge will periodically +# attempt to reconnect to the main bridge until successful. +# If round_robin is true, then all addresses are treated as equals. If a +# connection fails, the next address will be tried and if successful will +# remain connected until it fails +#round_robin false + +# Set the client id to use on the remote end of this bridge connection. If not +# defined, this defaults to 'name.hostname' where name is the connection name +# and hostname is the hostname of this computer. +# This replaces the old "clientid" option to avoid confusion. "clientid" +# remains valid for the time being. +#remote_clientid + +# Set the clientid to use on the local broker. If not defined, this defaults to +# 'local.'. If you are bridging a broker to itself, it is important +# that local_clientid and clientid do not match. +#local_clientid + +# Set the clean session variable for this bridge. +# When set to true, when the bridge disconnects for any reason, all +# messages and subscriptions will be cleaned up on the remote +# broker. Note that with cleansession set to true, there may be a +# significant amount of retained messages sent when the bridge +# reconnects after losing its connection. +# When set to false, the subscriptions and messages are kept on the +# remote broker, and delivered when the bridge reconnects. +#cleansession false + +# If set to true, publish notification messages to the local and remote brokers +# giving information about the state of the bridge connection. Retained +# messages are published to the topic $SYS/broker/connection//state +# unless the notification_topic option is used. +# If the message is 1 then the connection is active, or 0 if the connection has +# failed. +#notifications true + +# Choose the topic on which notification messages for this bridge are +# published. If not set, messages are published on the topic +# $SYS/broker/connection//state +#notification_topic + +# Set the keepalive interval for this bridge connection, in +# seconds. +#keepalive_interval 60 + +# Set the start type of the bridge. This controls how the bridge starts and +# can be one of three types: automatic, lazy and once. Note that RSMB provides +# a fourth start type "manual" which isn't currently supported by mosquitto. +# +# "automatic" is the default start type and means that the bridge connection +# will be started automatically when the broker starts and also restarted +# after a short delay (30 seconds) if the connection fails. +# +# Bridges using the "lazy" start type will be started automatically when the +# number of queued messages exceeds the number set with the "threshold" +# parameter. It will be stopped automatically after the time set by the +# "idle_timeout" parameter. Use this start type if you wish the connection to +# only be active when it is needed. +# +# A bridge using the "once" start type will be started automatically when the +# broker starts but will not be restarted if the connection fails. +#start_type automatic + +# Set the amount of time a bridge using the automatic start type will wait +# until attempting to reconnect. Defaults to 30 seconds. +#restart_timeout 30 + +# Set the amount of time a bridge using the lazy start type must be idle before +# it will be stopped. Defaults to 60 seconds. +#idle_timeout 60 + +# Set the number of messages that need to be queued for a bridge with lazy +# start type to be restarted. Defaults to 10 messages. +# Must be less than max_queued_messages. +#threshold 10 + +# If try_private is set to true, the bridge will attempt to indicate to the +# remote broker that it is a bridge not an ordinary client. If successful, this +# means that loop detection will be more effective and that retained messages +# will be propagated correctly. Not all brokers support this feature so it may +# be necessary to set try_private to false if your bridge does not connect +# properly. +#try_private true + +# Set the username to use when connecting to a broker that requires +# authentication. +# This replaces the old "username" option to avoid confusion. "username" +# remains valid for the time being. +#remote_username + +# Set the password to use when connecting to a broker that requires +# authentication. This option is only used if remote_username is also set. +# This replaces the old "password" option to avoid confusion. "password" +# remains valid for the time being. +#remote_password + +# ----------------------------------------------------------------- +# Certificate based SSL/TLS support +# ----------------------------------------------------------------- +# Either bridge_cafile or bridge_capath must be defined to enable TLS support +# for this bridge. +# bridge_cafile defines the path to a file containing the +# Certificate Authority certificates that have signed the remote broker +# certificate. +# bridge_capath defines a directory that will be searched for files containing +# the CA certificates. For bridge_capath to work correctly, the certificate +# files must have ".crt" as the file ending and you must run "c_rehash " each time you add/remove a certificate. +#bridge_cafile +#bridge_capath + +# Path to the PEM encoded client certificate, if required by the remote broker. +#bridge_certfile + +# Path to the PEM encoded client private key, if required by the remote broker. +#bridge_keyfile + +# When using certificate based encryption, bridge_insecure disables +# verification of the server hostname in the server certificate. This can be +# useful when testing initial server configurations, but makes it possible for +# a malicious third party to impersonate your server through DNS spoofing, for +# example. Use this option in testing only. If you need to resort to using this +# option in a production environment, your setup is at fault and there is no +# point using encryption. +#bridge_insecure false + +# ----------------------------------------------------------------- +# PSK based SSL/TLS support +# ----------------------------------------------------------------- +# Pre-shared-key encryption provides an alternative to certificate based +# encryption. A bridge can be configured to use PSK with the bridge_identity +# and bridge_psk options. These are the client PSK identity, and pre-shared-key +# in hexadecimal format with no "0x". Only one of certificate and PSK based +# encryption can be used on one +# bridge at once. +#bridge_identity +#bridge_psk + + +# ================================================================= +# External config files +# ================================================================= + +# External configuration files may be included by using the +# include_dir option. This defines a directory that will be searched +# for config files. All files that end in '.conf' will be loaded as +# a configuration file. It is best to have this as the last option +# in the main file. This option will only be processed from the main +# configuration file. The directory specified must not contain the +# main configuration file. +#include_dir + +# ================================================================= +# rsmb options - unlikely to ever be supported +# ================================================================= + +#ffdc_output +#max_log_entries +#trace_level +#trace_output diff --git a/nginx/Dockerfile b/nginx/Dockerfile index c6481303..eb1015bd 100644 --- a/nginx/Dockerfile +++ b/nginx/Dockerfile @@ -14,18 +14,29 @@ RUN if [ ${CHANGE_SOURCE} = true ]; then \ RUN apk update \ && apk upgrade \ + && apk --update add logrotate \ && apk add --no-cache openssl \ - && apk add --no-cache bash \ - && adduser -D -H -u 1000 -s /bin/bash www-data + && apk add --no-cache bash + +RUN set -x ; \ + addgroup -g 82 -S www-data ; \ + adduser -u 82 -D -S -G www-data www-data && exit 0 ; exit 1 ARG PHP_UPSTREAM_CONTAINER=php-fpm ARG PHP_UPSTREAM_PORT=9000 +# Create 'messages' file used from 'logrotate' +RUN touch /var/log/messages + +# Copy 'logrotate' config file +COPY logrotate/nginx /etc/logrotate.d/ + # Set upstream conf and remove the default conf RUN echo "upstream php-upstream { server ${PHP_UPSTREAM_CONTAINER}:${PHP_UPSTREAM_PORT}; }" > /etc/nginx/conf.d/upstream.conf \ && rm /etc/nginx/conf.d/default.conf ADD ./startup.sh /opt/startup.sh +RUN sed -i 's/\r//g' /opt/startup.sh CMD ["/bin/bash", "/opt/startup.sh"] EXPOSE 80 443 diff --git a/nginx/logrotate/nginx b/nginx/logrotate/nginx new file mode 100644 index 00000000..8c89a83a --- /dev/null +++ b/nginx/logrotate/nginx @@ -0,0 +1,14 @@ +/var/log/nginx/*.log { + daily + missingok + rotate 32 + compress + delaycompress + nodateext + notifempty + create 644 www-data root + sharedscripts + postrotate + [ -f /var/run/nginx.pid ] && kill -USR1 `cat /var/run/nginx.pid` + endscript +} diff --git a/nginx/ssl/.gitignore b/nginx/ssl/.gitignore new file mode 100644 index 00000000..003cd8e5 --- /dev/null +++ b/nginx/ssl/.gitignore @@ -0,0 +1,4 @@ +*.crt +*.csr +*.key +*.pem \ No newline at end of file diff --git a/nginx/startup.sh b/nginx/startup.sh index 069d1418..f8e7b229 100644 --- a/nginx/startup.sh +++ b/nginx/startup.sh @@ -6,4 +6,8 @@ if [ ! -f /etc/nginx/ssl/default.crt ]; then openssl x509 -req -days 365 -in "/etc/nginx/ssl/default.csr" -signkey "/etc/nginx/ssl/default.key" -out "/etc/nginx/ssl/default.crt" fi +# Start crond in background +crond -l 2 -b + +# Start nginx in foreground nginx diff --git a/pgadmin/Dockerfile b/pgadmin/Dockerfile deleted file mode 100644 index c507b589..00000000 --- a/pgadmin/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM fenglc/pgadmin4 - -LABEL maintainer="Huadong Zuo " - -# user: pgadmin4@pgadmin.org -# password: admin -# pg_dump & postgresql all in "/usr/bin" -# backup in "/var/lib/pgadmin/storage/pgadmin4" - -EXPOSE 5050 diff --git a/php-fpm/Dockerfile b/php-fpm/Dockerfile index 3bd250d4..68a1933e 100644 --- a/php-fpm/Dockerfile +++ b/php-fpm/Dockerfile @@ -14,7 +14,8 @@ ARG LARADOCK_PHP_VERSION -FROM laradock/php-fpm:2.2-${LARADOCK_PHP_VERSION} +# FROM laradock/php-fpm:2.2-${LARADOCK_PHP_VERSION} +FROM letsdockerize/laradock-php-fpm:2.4-${LARADOCK_PHP_VERSION} LABEL maintainer="Mahmoud Zalt " @@ -24,20 +25,27 @@ ARG LARADOCK_PHP_VERSION ENV DEBIAN_FRONTEND noninteractive # always run apt update when start and after add new source list, then clean up at end. -RUN apt-get update -yqq && \ - apt-get install -y apt-utils && \ - pecl channel-update pecl.php.net - -# -#-------------------------------------------------------------------------- -# Mandatory Software's Installation -#-------------------------------------------------------------------------- -# -# Mandatory Software's such as ("mcrypt", "pdo_mysql", "libssl-dev", ....) -# are installed on the base image 'laradock/php-fpm' image. If you want -# to add more Software's or remove existing one, you need to edit the -# base image (https://github.com/Laradock/php-fpm). -# +RUN set -xe; \ + apt-get update -yqq && \ + pecl channel-update pecl.php.net && \ + apt-get install -yqq \ + apt-utils \ + # + #-------------------------------------------------------------------------- + # Mandatory Software's Installation + #-------------------------------------------------------------------------- + # + # Mandatory Software's such as ("mcrypt", "pdo_mysql", "libssl-dev", ....) + # are installed on the base image 'laradock/php-fpm' image. If you want + # to add more Software's or remove existing one, you need to edit the + # base image (https://github.com/Laradock/php-fpm). + # + # next lines are here becase there is no auto build on dockerhub see https://github.com/laradock/laradock/pull/1903#issuecomment-463142846 + libzip-dev zip unzip && \ + docker-php-ext-configure zip --with-libzip && \ + # Install the zip extension + docker-php-ext-install zip && \ + php -m | grep -q 'zip' # #-------------------------------------------------------------------------- @@ -47,7 +55,7 @@ RUN apt-get update -yqq && \ # Optional Software's will only be installed if you set them to `true` # in the `docker-compose.yml` before the build. # Example: -# - INSTALL_ZIP_ARCHIVE=true +# - INSTALL_SOAP=true # ########################################################################### @@ -92,6 +100,18 @@ RUN if [ ${INSTALL_SOAP} = true ]; then \ docker-php-ext-install soap \ ;fi +########################################################################### +# XSL: +########################################################################### + +ARG INSTALL_XSL=false + +RUN if [ ${INSTALL_XSL} = true ]; then \ + # Install the xsl extension + apt-get -y install libxslt-dev && \ + docker-php-ext-install xsl \ +;fi + ########################################################################### # pgsql ########################################################################### @@ -108,13 +128,17 @@ RUN if [ ${INSTALL_PGSQL} = true ]; then \ ########################################################################### ARG INSTALL_PG_CLIENT=false +ARG INSTALL_POSTGIS=false RUN if [ ${INSTALL_PG_CLIENT} = true ]; then \ # Create folders if not exists (https://github.com/tianon/docker-brew-debian/issues/65) mkdir -p /usr/share/man/man1 && \ mkdir -p /usr/share/man/man7 && \ # Install the pgsql client - apt-get install -y postgresql-client \ + apt-get install -y postgresql-client && \ + if [ ${INSTALL_POSTGIS} = true ]; then \ + apt-get install -y postgis; \ + fi \ ;fi ########################################################################### @@ -187,7 +211,7 @@ ARG INSTALL_SWOOLE=false RUN if [ ${INSTALL_SWOOLE} = true ]; then \ # Install Php Swoole Extension if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ - pecl install swoole-2.0.11; \ + pecl install swoole-2.0.10; \ else \ if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \ pecl install swoole-2.2.0; \ @@ -196,6 +220,22 @@ RUN if [ ${INSTALL_SWOOLE} = true ]; then \ fi \ fi && \ docker-php-ext-enable swoole \ + && php -m | grep -q 'swoole' \ +;fi + +########################################################################### +# Taint EXTENSION +########################################################################### + +ARG INSTALL_TAINT=false + +RUN if [ ${INSTALL_TAINT} = true ]; then \ + # Install Php TAINT Extension + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ]; then \ + pecl install taint && \ + docker-php-ext-enable taint && \ + php -m | grep -q 'taint'; \ + fi \ ;fi ########################################################################### @@ -214,6 +254,38 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \ docker-php-ext-enable mongodb \ ;fi +########################################################################### +# Xhprof: +########################################################################### + +ARG INSTALL_XHPROF=false + +RUN if [ ${INSTALL_XHPROF} = true ]; then \ + # Install the php xhprof extension + if [ $(php -r "echo PHP_MAJOR_VERSION;") = 7 ]; then \ + curl -L -o /tmp/xhprof.tar.gz "https://github.com/tideways/php-xhprof-extension/archive/v4.1.7.tar.gz"; \ + else \ + curl -L -o /tmp/xhprof.tar.gz "https://codeload.github.com/phacility/xhprof/tar.gz/master"; \ + fi \ + && mkdir -p xhprof \ + && tar -C xhprof -zxvf /tmp/xhprof.tar.gz --strip 1 \ + && ( \ + cd xhprof \ + && phpize \ + && ./configure \ + && make \ + && make install \ + ) \ + && rm -r xhprof \ + && rm /tmp/xhprof.tar.gz \ +;fi + +COPY ./xhprof.ini /usr/local/etc/php/conf.d + +RUN if [ ${INSTALL_XHPROF} = false ]; then \ + rm /usr/local/etc/php/conf.d/xhprof.ini \ +;fi + ########################################################################### # AMQP: ########################################################################### @@ -221,23 +293,21 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \ ARG INSTALL_AMQP=false RUN if [ ${INSTALL_AMQP} = true ]; then \ - apt-get install librabbitmq-dev -y && \ + # download and install manually, to make sure it's compatible with ampq installed by pecl later + # install cmake first + apt-get update && apt-get -y install cmake && \ + curl -L -o /tmp/rabbitmq-c.tar.gz https://github.com/alanxz/rabbitmq-c/archive/master.tar.gz && \ + mkdir -p rabbitmq-c && \ + tar -C rabbitmq-c -zxvf /tmp/rabbitmq-c.tar.gz --strip 1 && \ + cd rabbitmq-c/ && \ + mkdir _build && cd _build/ && \ + cmake .. && \ + cmake --build . --target install && \ # Install the amqp extension pecl install amqp && \ - docker-php-ext-enable amqp \ -;fi - -########################################################################### -# ZipArchive: -########################################################################### - -ARG INSTALL_ZIP_ARCHIVE=false - -RUN if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \ - apt-get install libzip-dev -y && \ - docker-php-ext-configure zip --with-libzip && \ - # Install the zip extension - docker-php-ext-install zip \ + docker-php-ext-enable amqp && \ + # Install the sockets extension + docker-php-ext-install sockets \ ;fi ########################################################################### @@ -287,7 +357,7 @@ RUN if [ ${INSTALL_MEMCACHED} = true ]; then \ if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ curl -L -o /tmp/memcached.tar.gz "https://github.com/php-memcached-dev/php-memcached/archive/2.2.0.tar.gz"; \ else \ - curl -L -o /tmp/memcached.tar.gz "https://github.com/php-memcached-dev/php-memcached/archive/php7.tar.gz"; \ + curl -L -o /tmp/memcached.tar.gz "https://github.com/php-memcached-dev/php-memcached/archive/master.tar.gz"; \ fi \ && mkdir -p memcached \ && tar -C memcached -zxvf /tmp/memcached.tar.gz --strip 1 \ @@ -321,30 +391,30 @@ RUN if [ ${INSTALL_EXIF} = true ]; then \ USER root ARG INSTALL_AEROSPIKE=false -ARG AEROSPIKE_PHP_REPOSITORY -RUN if [ ${INSTALL_AEROSPIKE} = true ]; then \ +RUN set -xe; \ + if [ ${INSTALL_AEROSPIKE} = true ]; then \ # Fix dependencies for PHPUnit within aerospike extension apt-get -y install sudo wget && \ # Install the php aerospike extension if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ curl -L -o /tmp/aerospike-client-php.tar.gz https://github.com/aerospike/aerospike-client-php5/archive/master.tar.gz; \ else \ - curl -L -o /tmp/aerospike-client-php.tar.gz ${AEROSPIKE_PHP_REPOSITORY}; \ + curl -L -o /tmp/aerospike-client-php.tar.gz https://github.com/aerospike/aerospike-client-php/archive/master.tar.gz; \ fi \ - && mkdir -p aerospike-client-php \ - && tar -C aerospike-client-php -zxvf /tmp/aerospike-client-php.tar.gz --strip 1 \ + && mkdir -p /tmp/aerospike-client-php \ + && tar -C /tmp/aerospike-client-php -zxvf /tmp/aerospike-client-php.tar.gz --strip 1 \ && \ if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ ( \ - cd aerospike-client-php/src/aerospike \ + cd /tmp/aerospike-client-php/src/aerospike \ && phpize \ && ./build.sh \ && make install \ ) \ else \ ( \ - cd aerospike-client-php/src \ + cd /tmp/aerospike-client-php/src \ && phpize \ && ./build.sh \ && make install \ @@ -438,7 +508,8 @@ RUN if [ ${INSTALL_LDAP} = true ]; then \ ARG INSTALL_MSSQL=false -RUN set -eux; if [ ${INSTALL_MSSQL} = true ]; then \ +RUN set -eux; \ + if [ ${INSTALL_MSSQL} = true ]; then \ if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ apt-get -y install freetds-dev libsybdb5 \ && ln -s /usr/lib/x86_64-linux-gnu/libsybdb.so /usr/lib/libsybdb.so \ @@ -461,12 +532,16 @@ RUN set -eux; if [ ${INSTALL_MSSQL} = true ]; then \ && ln -sfn /etc/locale.alias /usr/share/locale/locale.alias \ && locale-gen \ # Install pdo_sqlsrv and sqlsrv from PECL. Replace pdo_sqlsrv-4.1.8preview with preferred version. - && pecl install pdo_sqlsrv sqlsrv \ + && if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \ + pecl install pdo_sqlsrv-5.3.0 sqlsrv-5.3.0 \ + ;else \ + pecl install pdo_sqlsrv sqlsrv \ + ;fi \ && docker-php-ext-enable pdo_sqlsrv sqlsrv \ && php -m | grep -q 'pdo_sqlsrv' \ && php -m | grep -q 'sqlsrv' \ ;fi \ -;fi + ;fi ########################################################################### # Image optimizers: @@ -502,7 +577,6 @@ ARG INSTALL_IMAP=false RUN if [ ${INSTALL_IMAP} = true ]; then \ apt-get install -y libc-client-dev libkrb5-dev && \ - rm -r /var/lib/apt/lists/* && \ docker-php-ext-configure imap --with-kerberos --with-imap-ssl && \ docker-php-ext-install imap \ ;fi @@ -528,17 +602,34 @@ ARG INSTALL_PHALCON=false ARG LARADOCK_PHALCON_VERSION ENV LARADOCK_PHALCON_VERSION ${LARADOCK_PHALCON_VERSION} +# Copy phalcon configration +COPY ./phalcon.ini /usr/local/etc/php/conf.d/phalcon.ini.disable + RUN if [ $INSTALL_PHALCON = true ]; then \ apt-get update && apt-get install -y unzip libpcre3-dev gcc make re2c \ && curl -L -o /tmp/cphalcon.zip https://github.com/phalcon/cphalcon/archive/v${LARADOCK_PHALCON_VERSION}.zip \ && unzip -d /tmp/ /tmp/cphalcon.zip \ && cd /tmp/cphalcon-${LARADOCK_PHALCON_VERSION}/build \ && ./install \ - && echo "extension=phalcon.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/phalcon.ini \ - && ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/phalcon.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-phalcon.ini \ + && mv /usr/local/etc/php/conf.d/phalcon.ini.disable /usr/local/etc/php/conf.d/phalcon.ini \ && rm -rf /tmp/cphalcon* \ ;fi +########################################################################### +# APCU: +########################################################################### + +ARG INSTALL_APCU=false + +RUN if [ ${INSTALL_APCU} = true ]; then \ + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ + pecl install -a apcu-4.0.11; \ + else \ + pecl install apcu; \ + fi && \ + docker-php-ext-enable apcu \ +;fi + ########################################################################### # YAML: ########################################################################### @@ -549,15 +640,99 @@ ARG INSTALL_YAML=false RUN if [ ${INSTALL_YAML} = true ]; then \ apt-get install libyaml-dev -y ; \ - pecl install yaml ; \ + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ + pecl install -a yaml-1.3.2; \ + else \ + pecl install yaml; \ + fi && \ docker-php-ext-enable yaml \ ;fi +########################################################################### +# RDKAFKA: +########################################################################### + +ARG INSTALL_RDKAFKA=false + +RUN if [ ${INSTALL_RDKAFKA} = true ]; then \ + apt-get install -y librdkafka-dev && \ + pecl install rdkafka && \ + docker-php-ext-enable rdkafka \ +;fi + +########################################################################### +# Install additional locales: +########################################################################### + +ARG INSTALL_ADDITIONAL_LOCALES=false +ARG ADDITIONAL_LOCALES + +RUN if [ ${INSTALL_ADDITIONAL_LOCALES} = true ]; then \ + apt-get install -y locales \ + && echo '' >> /usr/share/locale/locale.alias \ + && temp="${ADDITIONAL_LOCALES%\"}" \ + && temp="${temp#\"}" \ + && for i in ${temp}; do sed -i "/$i/s/^#//g" /etc/locale.gen; done \ + && locale-gen \ +;fi + +########################################################################### +# MySQL Client: +########################################################################### + +USER root + +ARG INSTALL_MYSQL_CLIENT=false + +RUN if [ ${INSTALL_MYSQL_CLIENT} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install mysql-client \ +;fi + +########################################################################### +# ping: +########################################################################### + +USER root + +ARG INSTALL_PING=false + +RUN if [ ${INSTALL_PING} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install inetutils-ping \ +;fi + +########################################################################### +# sshpass: +########################################################################### + +USER root + +ARG INSTALL_SSHPASS=false + +RUN if [ ${INSTALL_SSHPASS} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install sshpass \ +;fi + +########################################################################### +# FFMPEG: +########################################################################### + +USER root + +ARG INSTALL_FFMPEG=false + +RUN if [ ${INSTALL_FFMPEG} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install ffmpeg \ +;fi + ########################################################################### # Check PHP version: ########################################################################### -RUN php -v | head -n 1 | grep -q "PHP ${LARADOCK_PHP_VERSION}." +RUN set -xe; php -v | head -n 1 | grep -q "PHP ${LARADOCK_PHP_VERSION}." # #-------------------------------------------------------------------------- diff --git a/php-fpm/opcache.ini b/php-fpm/opcache.ini index 9a3f646b..bf3d08e3 100644 --- a/php-fpm/opcache.ini +++ b/php-fpm/opcache.ini @@ -1,9 +1,9 @@ ; NOTE: The actual opcache.so extention is NOT SET HERE but rather (/usr/local/etc/php/conf.d/docker-php-ext-opcache.ini) -opcache.enable="1" -opcache.memory_consumption="256" -opcache.use_cwd="0" -opcache.max_file_size="0" -opcache.max_accelerated_files = 30000 -opcache.validate_timestamps="1" -opcache.revalidate_freq="0" +opcache.enable=1 +opcache.memory_consumption=256 +opcache.use_cwd=0 +opcache.max_file_size=0 +opcache.max_accelerated_files=30000 +opcache.validate_timestamps=1 +opcache.revalidate_freq=0 diff --git a/php-fpm/phalcon.ini b/php-fpm/phalcon.ini new file mode 100644 index 00000000..24b58bad --- /dev/null +++ b/php-fpm/phalcon.ini @@ -0,0 +1 @@ +extension=phalcon.so \ No newline at end of file diff --git a/php-fpm/php7.3.ini b/php-fpm/php7.3.ini new file mode 100644 index 00000000..9bf5f6ce --- /dev/null +++ b/php-fpm/php7.3.ini @@ -0,0 +1,1918 @@ +[PHP] + +;;;;;;;;;;;;;;;;;;; +; About php.ini ; +;;;;;;;;;;;;;;;;;;; +; PHP's initialization file, generally called php.ini, is responsible for +; configuring many of the aspects of PHP's behavior. + +; PHP attempts to find and load this configuration from a number of locations. +; The following is a summary of its search order: +; 1. SAPI module specific location. +; 2. The PHPRC environment variable. (As of PHP 5.2.0) +; 3. A number of predefined registry keys on Windows (As of PHP 5.2.0) +; 4. Current working directory (except CLI) +; 5. The web server's directory (for SAPI modules), or directory of PHP +; (otherwise in Windows) +; 6. The directory from the --with-config-file-path compile time option, or the +; Windows directory (C:\windows or C:\winnt) +; See the PHP docs for more specific information. +; http://php.net/configuration.file + +; The syntax of the file is extremely simple. Whitespace and lines +; beginning with a semicolon are silently ignored (as you probably guessed). +; Section headers (e.g. [Foo]) are also silently ignored, even though +; they might mean something in the future. + +; Directives following the section heading [PATH=/www/mysite] only +; apply to PHP files in the /www/mysite directory. Directives +; following the section heading [HOST=www.example.com] only apply to +; PHP files served from www.example.com. Directives set in these +; special sections cannot be overridden by user-defined INI files or +; at runtime. Currently, [PATH=] and [HOST=] sections only work under +; CGI/FastCGI. +; http://php.net/ini.sections + +; Directives are specified using the following syntax: +; directive = value +; Directive names are *case sensitive* - foo=bar is different from FOO=bar. +; Directives are variables used to configure PHP or PHP extensions. +; There is no name validation. If PHP can't find an expected +; directive because it is not set or is mistyped, a default value will be used. + +; The value can be a string, a number, a PHP constant (e.g. E_ALL or M_PI), one +; of the INI constants (On, Off, True, False, Yes, No and None) or an expression +; (e.g. E_ALL & ~E_NOTICE), a quoted string ("bar"), or a reference to a +; previously set variable or directive (e.g. ${foo}) + +; Expressions in the INI file are limited to bitwise operators and parentheses: +; | bitwise OR +; ^ bitwise XOR +; & bitwise AND +; ~ bitwise NOT +; ! boolean NOT + +; Boolean flags can be turned on using the values 1, On, True or Yes. +; They can be turned off using the values 0, Off, False or No. + +; An empty string can be denoted by simply not writing anything after the equal +; sign, or by using the None keyword: + +; foo = ; sets foo to an empty string +; foo = None ; sets foo to an empty string +; foo = "None" ; sets foo to the string 'None' + +; If you use constants in your value, and these constants belong to a +; dynamically loaded extension (either a PHP extension or a Zend extension), +; you may only use these constants *after* the line that loads the extension. + +;;;;;;;;;;;;;;;;;;; +; About this file ; +;;;;;;;;;;;;;;;;;;; +; PHP comes packaged with two INI files. One that is recommended to be used +; in production environments and one that is recommended to be used in +; development environments. + +; php.ini-production contains settings which hold security, performance and +; best practices at its core. But please be aware, these settings may break +; compatibility with older or less security conscience applications. We +; recommending using the production ini in production and testing environments. + +; php.ini-development is very similar to its production variant, except it is +; much more verbose when it comes to errors. We recommend using the +; development version only in development environments, as errors shown to +; application users can inadvertently leak otherwise secure information. + +; This is php.ini-production INI file. + +;;;;;;;;;;;;;;;;;;; +; Quick Reference ; +;;;;;;;;;;;;;;;;;;; +; The following are all the settings which are different in either the production +; or development versions of the INIs with respect to PHP's default behavior. +; Please see the actual settings later in the document for more details as to why +; we recommend these changes in PHP's behavior. + +; display_errors +; Default Value: On +; Development Value: On +; Production Value: Off + +; display_startup_errors +; Default Value: Off +; Development Value: On +; Production Value: Off + +; error_reporting +; Default Value: E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED +; Development Value: E_ALL +; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT + +; html_errors +; Default Value: On +; Development Value: On +; Production value: On + +; log_errors +; Default Value: Off +; Development Value: On +; Production Value: On + +; max_input_time +; Default Value: -1 (Unlimited) +; Development Value: 60 (60 seconds) +; Production Value: 60 (60 seconds) + +; output_buffering +; Default Value: Off +; Development Value: 4096 +; Production Value: 4096 + +; register_argc_argv +; Default Value: On +; Development Value: Off +; Production Value: Off + +; request_order +; Default Value: None +; Development Value: "GP" +; Production Value: "GP" + +; session.gc_divisor +; Default Value: 100 +; Development Value: 1000 +; Production Value: 1000 + +; session.sid_bits_per_character +; Default Value: 4 +; Development Value: 5 +; Production Value: 5 + +; short_open_tag +; Default Value: On +; Development Value: Off +; Production Value: Off + +; track_errors +; Default Value: Off +; Development Value: On +; Production Value: Off + +; variables_order +; Default Value: "EGPCS" +; Development Value: "GPCS" +; Production Value: "GPCS" + +;;;;;;;;;;;;;;;;;;;; +; php.ini Options ; +;;;;;;;;;;;;;;;;;;;; +; Name for user-defined php.ini (.htaccess) files. Default is ".user.ini" +;user_ini.filename = ".user.ini" + +; To disable this feature set this option to empty value +;user_ini.filename = + +; TTL for user-defined php.ini files (time-to-live) in seconds. Default is 300 seconds (5 minutes) +;user_ini.cache_ttl = 300 + +;;;;;;;;;;;;;;;;;;;; +; Language Options ; +;;;;;;;;;;;;;;;;;;;; + +; Enable the PHP scripting language engine under Apache. +; http://php.net/engine +engine = On + +; This directive determines whether or not PHP will recognize code between +; tags as PHP source which should be processed as such. It is +; generally recommended that should be used and that this feature +; should be disabled, as enabling it may result in issues when generating XML +; documents, however this remains supported for backward compatibility reasons. +; Note that this directive does not control the would work. +; http://php.net/syntax-highlighting +;highlight.string = #DD0000 +;highlight.comment = #FF9900 +;highlight.keyword = #007700 +;highlight.default = #0000BB +;highlight.html = #000000 + +; If enabled, the request will be allowed to complete even if the user aborts +; the request. Consider enabling it if executing long requests, which may end up +; being interrupted by the user or a browser timing out. PHP's default behavior +; is to disable this feature. +; http://php.net/ignore-user-abort +;ignore_user_abort = On + +; Determines the size of the realpath cache to be used by PHP. This value should +; be increased on systems where PHP opens many files to reflect the quantity of +; the file operations performed. +; http://php.net/realpath-cache-size +;realpath_cache_size = 4096k + +; Duration of time, in seconds for which to cache realpath information for a given +; file or directory. For systems with rarely changing files, consider increasing this +; value. +; http://php.net/realpath-cache-ttl +;realpath_cache_ttl = 120 + +; Enables or disables the circular reference collector. +; http://php.net/zend.enable-gc +zend.enable_gc = On + +; If enabled, scripts may be written in encodings that are incompatible with +; the scanner. CP936, Big5, CP949 and Shift_JIS are the examples of such +; encodings. To use this feature, mbstring extension must be enabled. +; Default: Off +;zend.multibyte = Off + +; Allows to set the default encoding for the scripts. This value will be used +; unless "declare(encoding=...)" directive appears at the top of the script. +; Only affects if zend.multibyte is set. +; Default: "" +;zend.script_encoding = + +;;;;;;;;;;;;;;;;; +; Miscellaneous ; +;;;;;;;;;;;;;;;;; + +; Decides whether PHP may expose the fact that it is installed on the server +; (e.g. by adding its signature to the Web server header). It is no security +; threat in any way, but it makes it possible to determine whether you use PHP +; on your server or not. +; http://php.net/expose-php +expose_php = On + +;;;;;;;;;;;;;;;;;;; +; Resource Limits ; +;;;;;;;;;;;;;;;;;;; + +; Maximum execution time of each script, in seconds +; http://php.net/max-execution-time +; Note: This directive is hardcoded to 0 for the CLI SAPI +max_execution_time = 600 + +; Maximum amount of time each script may spend parsing request data. It's a good +; idea to limit this time on productions servers in order to eliminate unexpectedly +; long running scripts. +; Note: This directive is hardcoded to -1 for the CLI SAPI +; Default Value: -1 (Unlimited) +; Development Value: 60 (60 seconds) +; Production Value: 60 (60 seconds) +; http://php.net/max-input-time +max_input_time = 120 + +; Maximum input variable nesting level +; http://php.net/max-input-nesting-level +;max_input_nesting_level = 64 + +; How many GET/POST/COOKIE input variables may be accepted +; max_input_vars = 1000 + +; Maximum amount of memory a script may consume (128MB) +; http://php.net/memory-limit +memory_limit = 256M + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Error handling and logging ; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; This directive informs PHP of which errors, warnings and notices you would like +; it to take action for. The recommended way of setting values for this +; directive is through the use of the error level constants and bitwise +; operators. The error level constants are below here for convenience as well as +; some common settings and their meanings. +; By default, PHP is set to take action on all errors, notices and warnings EXCEPT +; those related to E_NOTICE and E_STRICT, which together cover best practices and +; recommended coding standards in PHP. For performance reasons, this is the +; recommend error reporting setting. Your production server shouldn't be wasting +; resources complaining about best practices and coding standards. That's what +; development servers and development settings are for. +; Note: The php.ini-development file has this setting as E_ALL. This +; means it pretty much reports everything which is exactly what you want during +; development and early testing. +; +; Error Level Constants: +; E_ALL - All errors and warnings (includes E_STRICT as of PHP 5.4.0) +; E_ERROR - fatal run-time errors +; E_RECOVERABLE_ERROR - almost fatal run-time errors +; E_WARNING - run-time warnings (non-fatal errors) +; E_PARSE - compile-time parse errors +; E_NOTICE - run-time notices (these are warnings which often result +; from a bug in your code, but it's possible that it was +; intentional (e.g., using an uninitialized variable and +; relying on the fact it is automatically initialized to an +; empty string) +; E_STRICT - run-time notices, enable to have PHP suggest changes +; to your code which will ensure the best interoperability +; and forward compatibility of your code +; E_CORE_ERROR - fatal errors that occur during PHP's initial startup +; E_CORE_WARNING - warnings (non-fatal errors) that occur during PHP's +; initial startup +; E_COMPILE_ERROR - fatal compile-time errors +; E_COMPILE_WARNING - compile-time warnings (non-fatal errors) +; E_USER_ERROR - user-generated error message +; E_USER_WARNING - user-generated warning message +; E_USER_NOTICE - user-generated notice message +; E_DEPRECATED - warn about code that will not work in future versions +; of PHP +; E_USER_DEPRECATED - user-generated deprecation warnings +; +; Common Values: +; E_ALL (Show all errors, warnings and notices including coding standards.) +; E_ALL & ~E_NOTICE (Show all errors, except for notices) +; E_ALL & ~E_NOTICE & ~E_STRICT (Show all errors, except for notices and coding standards warnings.) +; E_COMPILE_ERROR|E_RECOVERABLE_ERROR|E_ERROR|E_CORE_ERROR (Show only errors) +; Default Value: E_ALL & ~E_NOTICE & ~E_STRICT & ~E_DEPRECATED +; Development Value: E_ALL +; Production Value: E_ALL & ~E_DEPRECATED & ~E_STRICT +; http://php.net/error-reporting +error_reporting = E_ALL & ~E_DEPRECATED & ~E_STRICT + +; This directive controls whether or not and where PHP will output errors, +; notices and warnings too. Error output is very useful during development, but +; it could be very dangerous in production environments. Depending on the code +; which is triggering the error, sensitive information could potentially leak +; out of your application such as database usernames and passwords or worse. +; For production environments, we recommend logging errors rather than +; sending them to STDOUT. +; Possible Values: +; Off = Do not display any errors +; stderr = Display errors to STDERR (affects only CGI/CLI binaries!) +; On or stdout = Display errors to STDOUT +; Default Value: On +; Development Value: On +; Production Value: Off +; http://php.net/display-errors +display_errors = Off + +; The display of errors which occur during PHP's startup sequence are handled +; separately from display_errors. PHP's default behavior is to suppress those +; errors from clients. Turning the display of startup errors on can be useful in +; debugging configuration problems. We strongly recommend you +; set this to 'off' for production servers. +; Default Value: Off +; Development Value: On +; Production Value: Off +; http://php.net/display-startup-errors +display_startup_errors = Off + +; Besides displaying errors, PHP can also log errors to locations such as a +; server-specific log, STDERR, or a location specified by the error_log +; directive found below. While errors should not be displayed on productions +; servers they should still be monitored and logging is a great way to do that. +; Default Value: Off +; Development Value: On +; Production Value: On +; http://php.net/log-errors +log_errors = On + +; Set maximum length of log_errors. In error_log information about the source is +; added. The default is 1024 and 0 allows to not apply any maximum length at all. +; http://php.net/log-errors-max-len +log_errors_max_len = 1024 + +; Do not log repeated messages. Repeated errors must occur in same file on same +; line unless ignore_repeated_source is set true. +; http://php.net/ignore-repeated-errors +ignore_repeated_errors = Off + +; Ignore source of message when ignoring repeated messages. When this setting +; is On you will not log errors with repeated messages from different files or +; source lines. +; http://php.net/ignore-repeated-source +ignore_repeated_source = Off + +; If this parameter is set to Off, then memory leaks will not be shown (on +; stdout or in the log). This has only effect in a debug compile, and if +; error reporting includes E_WARNING in the allowed list +; http://php.net/report-memleaks +report_memleaks = On + +; This setting is on by default. +;report_zend_debug = 0 + +; Store the last error/warning message in $php_errormsg (boolean). Setting this value +; to On can assist in debugging and is appropriate for development servers. It should +; however be disabled on production servers. +; Default Value: Off +; Development Value: On +; Production Value: Off +; http://php.net/track-errors +track_errors = Off + +; Turn off normal error reporting and emit XML-RPC error XML +; http://php.net/xmlrpc-errors +;xmlrpc_errors = 0 + +; An XML-RPC faultCode +;xmlrpc_error_number = 0 + +; When PHP displays or logs an error, it has the capability of formatting the +; error message as HTML for easier reading. This directive controls whether +; the error message is formatted as HTML or not. +; Note: This directive is hardcoded to Off for the CLI SAPI +; Default Value: On +; Development Value: On +; Production value: On +; http://php.net/html-errors +html_errors = On + +; If html_errors is set to On *and* docref_root is not empty, then PHP +; produces clickable error messages that direct to a page describing the error +; or function causing the error in detail. +; You can download a copy of the PHP manual from http://php.net/docs +; and change docref_root to the base URL of your local copy including the +; leading '/'. You must also specify the file extension being used including +; the dot. PHP's default behavior is to leave these settings empty, in which +; case no links to documentation are generated. +; Note: Never use this feature for production boxes. +; http://php.net/docref-root +; Examples +;docref_root = "/phpmanual/" + +; http://php.net/docref-ext +;docref_ext = .html + +; String to output before an error message. PHP's default behavior is to leave +; this setting blank. +; http://php.net/error-prepend-string +; Example: +;error_prepend_string = "" + +; String to output after an error message. PHP's default behavior is to leave +; this setting blank. +; http://php.net/error-append-string +; Example: +;error_append_string = "" + +; Log errors to specified file. PHP's default behavior is to leave this value +; empty. +; http://php.net/error-log +; Example: +;error_log = php_errors.log +; Log errors to syslog (Event Log on Windows). +;error_log = syslog + +;windows.show_crt_warning +; Default value: 0 +; Development value: 0 +; Production value: 0 + +;;;;;;;;;;;;;;;;; +; Data Handling ; +;;;;;;;;;;;;;;;;; + +; The separator used in PHP generated URLs to separate arguments. +; PHP's default setting is "&". +; http://php.net/arg-separator.output +; Example: +;arg_separator.output = "&" + +; List of separator(s) used by PHP to parse input URLs into variables. +; PHP's default setting is "&". +; NOTE: Every character in this directive is considered as separator! +; http://php.net/arg-separator.input +; Example: +;arg_separator.input = ";&" + +; This directive determines which super global arrays are registered when PHP +; starts up. G,P,C,E & S are abbreviations for the following respective super +; globals: GET, POST, COOKIE, ENV and SERVER. There is a performance penalty +; paid for the registration of these arrays and because ENV is not as commonly +; used as the others, ENV is not recommended on productions servers. You +; can still get access to the environment variables through getenv() should you +; need to. +; Default Value: "EGPCS" +; Development Value: "GPCS" +; Production Value: "GPCS"; +; http://php.net/variables-order +variables_order = "GPCS" + +; This directive determines which super global data (G,P & C) should be +; registered into the super global array REQUEST. If so, it also determines +; the order in which that data is registered. The values for this directive +; are specified in the same manner as the variables_order directive, +; EXCEPT one. Leaving this value empty will cause PHP to use the value set +; in the variables_order directive. It does not mean it will leave the super +; globals array REQUEST empty. +; Default Value: None +; Development Value: "GP" +; Production Value: "GP" +; http://php.net/request-order +request_order = "GP" + +; This directive determines whether PHP registers $argv & $argc each time it +; runs. $argv contains an array of all the arguments passed to PHP when a script +; is invoked. $argc contains an integer representing the number of arguments +; that were passed when the script was invoked. These arrays are extremely +; useful when running scripts from the command line. When this directive is +; enabled, registering these variables consumes CPU cycles and memory each time +; a script is executed. For performance reasons, this feature should be disabled +; on production servers. +; Note: This directive is hardcoded to On for the CLI SAPI +; Default Value: On +; Development Value: Off +; Production Value: Off +; http://php.net/register-argc-argv +register_argc_argv = Off + +; When enabled, the ENV, REQUEST and SERVER variables are created when they're +; first used (Just In Time) instead of when the script starts. If these +; variables are not used within a script, having this directive on will result +; in a performance gain. The PHP directive register_argc_argv must be disabled +; for this directive to have any affect. +; http://php.net/auto-globals-jit +auto_globals_jit = On + +; Whether PHP will read the POST data. +; This option is enabled by default. +; Most likely, you won't want to disable this option globally. It causes $_POST +; and $_FILES to always be empty; the only way you will be able to read the +; POST data will be through the php://input stream wrapper. This can be useful +; to proxy requests or to process the POST data in a memory efficient fashion. +; http://php.net/enable-post-data-reading +;enable_post_data_reading = Off + +; Maximum size of POST data that PHP will accept. +; Its value may be 0 to disable the limit. It is ignored if POST data reading +; is disabled through enable_post_data_reading. +; http://php.net/post-max-size +post_max_size = 8M + +; Automatically add files before PHP document. +; http://php.net/auto-prepend-file +auto_prepend_file = + +; Automatically add files after PHP document. +; http://php.net/auto-append-file +auto_append_file = + +; By default, PHP will output a media type using the Content-Type header. To +; disable this, simply set it to be empty. +; +; PHP's built-in default media type is set to text/html. +; http://php.net/default-mimetype +default_mimetype = "text/html" + +; PHP's default character set is set to UTF-8. +; http://php.net/default-charset +default_charset = "UTF-8" + +; PHP internal character encoding is set to empty. +; If empty, default_charset is used. +; http://php.net/internal-encoding +;internal_encoding = + +; PHP input character encoding is set to empty. +; If empty, default_charset is used. +; http://php.net/input-encoding +;input_encoding = + +; PHP output character encoding is set to empty. +; If empty, default_charset is used. +; See also output_buffer. +; http://php.net/output-encoding +;output_encoding = + +;;;;;;;;;;;;;;;;;;;;;;;;; +; Paths and Directories ; +;;;;;;;;;;;;;;;;;;;;;;;;; + +; UNIX: "/path1:/path2" +;include_path = ".:/php/includes" +; +; Windows: "\path1;\path2" +;include_path = ".;c:\php\includes" +; +; PHP's default setting for include_path is ".;/path/to/php/pear" +; http://php.net/include-path + +; The root of the PHP pages, used only if nonempty. +; if PHP was not compiled with FORCE_REDIRECT, you SHOULD set doc_root +; if you are running php as a CGI under any web server (other than IIS) +; see documentation for security issues. The alternate is to use the +; cgi.force_redirect configuration below +; http://php.net/doc-root +doc_root = + +; The directory under which PHP opens the script using /~username used only +; if nonempty. +; http://php.net/user-dir +user_dir = + +; Directory in which the loadable extensions (modules) reside. +; http://php.net/extension-dir +; extension_dir = "./" +; On windows: +; extension_dir = "ext" + +; Directory where the temporary files should be placed. +; Defaults to the system default (see sys_get_temp_dir) +; sys_temp_dir = "/tmp" + +; Whether or not to enable the dl() function. The dl() function does NOT work +; properly in multithreaded servers, such as IIS or Zeus, and is automatically +; disabled on them. +; http://php.net/enable-dl +enable_dl = Off + +; cgi.force_redirect is necessary to provide security running PHP as a CGI under +; most web servers. Left undefined, PHP turns this on by default. You can +; turn it off here AT YOUR OWN RISK +; **You CAN safely turn this off for IIS, in fact, you MUST.** +; http://php.net/cgi.force-redirect +;cgi.force_redirect = 1 + +; if cgi.nph is enabled it will force cgi to always sent Status: 200 with +; every request. PHP's default behavior is to disable this feature. +;cgi.nph = 1 + +; if cgi.force_redirect is turned on, and you are not running under Apache or Netscape +; (iPlanet) web servers, you MAY need to set an environment variable name that PHP +; will look for to know it is OK to continue execution. Setting this variable MAY +; cause security issues, KNOW WHAT YOU ARE DOING FIRST. +; http://php.net/cgi.redirect-status-env +;cgi.redirect_status_env = + +; cgi.fix_pathinfo provides *real* PATH_INFO/PATH_TRANSLATED support for CGI. PHP's +; previous behaviour was to set PATH_TRANSLATED to SCRIPT_FILENAME, and to not grok +; what PATH_INFO is. For more information on PATH_INFO, see the cgi specs. Setting +; this to 1 will cause PHP CGI to fix its paths to conform to the spec. A setting +; of zero causes PHP to behave as before. Default is 1. You should fix your scripts +; to use SCRIPT_FILENAME rather than PATH_TRANSLATED. +; http://php.net/cgi.fix-pathinfo +;cgi.fix_pathinfo=1 + +; if cgi.discard_path is enabled, the PHP CGI binary can safely be placed outside +; of the web tree and people will not be able to circumvent .htaccess security. +; http://php.net/cgi.dicard-path +;cgi.discard_path=1 + +; FastCGI under IIS (on WINNT based OS) supports the ability to impersonate +; security tokens of the calling client. This allows IIS to define the +; security context that the request runs under. mod_fastcgi under Apache +; does not currently support this feature (03/17/2002) +; Set to 1 if running under IIS. Default is zero. +; http://php.net/fastcgi.impersonate +;fastcgi.impersonate = 1 + +; Disable logging through FastCGI connection. PHP's default behavior is to enable +; this feature. +;fastcgi.logging = 0 + +; cgi.rfc2616_headers configuration option tells PHP what type of headers to +; use when sending HTTP response code. If set to 0, PHP sends Status: header that +; is supported by Apache. When this option is set to 1, PHP will send +; RFC2616 compliant header. +; Default is zero. +; http://php.net/cgi.rfc2616-headers +;cgi.rfc2616_headers = 0 + +; cgi.check_shebang_line controls whether CGI PHP checks for line starting with #! +; (shebang) at the top of the running script. This line might be needed if the +; script support running both as stand-alone script and via PHP CGI<. PHP in CGI +; mode skips this line and ignores its content if this directive is turned on. +; http://php.net/cgi.check-shebang-line +;cgi.check_shebang_line=1 + +;;;;;;;;;;;;;;;; +; File Uploads ; +;;;;;;;;;;;;;;;; + +; Whether to allow HTTP file uploads. +; http://php.net/file-uploads +file_uploads = On + +; Temporary directory for HTTP uploaded files (will use system default if not +; specified). +; http://php.net/upload-tmp-dir +;upload_tmp_dir = + +; Maximum allowed size for uploaded files. +; http://php.net/upload-max-filesize +upload_max_filesize = 2M + +; Maximum number of files that can be uploaded via a single request +max_file_uploads = 20 + +;;;;;;;;;;;;;;;;;; +; Fopen wrappers ; +;;;;;;;;;;;;;;;;;; + +; Whether to allow the treatment of URLs (like http:// or ftp://) as files. +; http://php.net/allow-url-fopen +allow_url_fopen = On + +; Whether to allow include/require to open URLs (like http:// or ftp://) as files. +; http://php.net/allow-url-include +allow_url_include = Off + +; Define the anonymous ftp password (your email address). PHP's default setting +; for this is empty. +; http://php.net/from +;from="john@doe.com" + +; Define the User-Agent string. PHP's default setting for this is empty. +; http://php.net/user-agent +;user_agent="PHP" + +; Default timeout for socket based streams (seconds) +; http://php.net/default-socket-timeout +default_socket_timeout = 60 + +; If your scripts have to deal with files from Macintosh systems, +; or you are running on a Mac and need to deal with files from +; unix or win32 systems, setting this flag will cause PHP to +; automatically detect the EOL character in those files so that +; fgets() and file() will work regardless of the source of the file. +; http://php.net/auto-detect-line-endings +;auto_detect_line_endings = Off + +;;;;;;;;;;;;;;;;;;;;;; +; Dynamic Extensions ; +;;;;;;;;;;;;;;;;;;;;;; + +; If you wish to have an extension loaded automatically, use the following +; syntax: +; +; extension=modulename.extension +; +; For example, on Windows: +; +; extension=mysqli.dll +; +; ... or under UNIX: +; +; extension=mysqli.so +; +; ... or with a path: +; +; extension=/path/to/extension/mysqli.so +; +; If you only provide the name of the extension, PHP will look for it in its +; default extension directory. +; +; Windows Extensions +; Note that ODBC support is built in, so no dll is needed for it. +; Note that many DLL files are located in the extensions/ (PHP 4) ext/ (PHP 5+) +; extension folders as well as the separate PECL DLL download (PHP 5+). +; Be sure to appropriately set the extension_dir directive. +; +;extension=php_bz2.dll +;extension=php_curl.dll +;extension=php_fileinfo.dll +;extension=php_ftp.dll +;extension=php_gd2.dll +;extension=php_gettext.dll +;extension=php_gmp.dll +;extension=php_intl.dll +;extension=php_imap.dll +;extension=php_interbase.dll +;extension=php_ldap.dll +;extension=php_mbstring.dll +;extension=php_exif.dll ; Must be after mbstring as it depends on it +;extension=php_mysqli.dll +;extension=php_oci8_12c.dll ; Use with Oracle Database 12c Instant Client +;extension=php_openssl.dll +;extension=php_pdo_firebird.dll +;extension=php_pdo_mysql.dll +;extension=php_pdo_oci.dll +;extension=php_pdo_odbc.dll +;extension=php_pdo_pgsql.dll +;extension=php_pdo_sqlite.dll +;extension=php_pgsql.dll +;extension=php_shmop.dll + +; The MIBS data available in the PHP distribution must be installed. +; See http://www.php.net/manual/en/snmp.installation.php +;extension=php_snmp.dll + +;extension=php_soap.dll +;extension=php_sockets.dll +;extension=php_sqlite3.dll +;extension=php_tidy.dll +;extension=php_xmlrpc.dll +;extension=php_xsl.dll + +;;;;;;;;;;;;;;;;;;; +; Module Settings ; +;;;;;;;;;;;;;;;;;;; + +[CLI Server] +; Whether the CLI web server uses ANSI color coding in its terminal output. +cli_server.color = On + +[Date] +; Defines the default timezone used by the date functions +; http://php.net/date.timezone +;date.timezone = + +; http://php.net/date.default-latitude +;date.default_latitude = 31.7667 + +; http://php.net/date.default-longitude +;date.default_longitude = 35.2333 + +; http://php.net/date.sunrise-zenith +;date.sunrise_zenith = 90.583333 + +; http://php.net/date.sunset-zenith +;date.sunset_zenith = 90.583333 + +[filter] +; http://php.net/filter.default +;filter.default = unsafe_raw + +; http://php.net/filter.default-flags +;filter.default_flags = + +[iconv] +; Use of this INI entry is deprecated, use global input_encoding instead. +; If empty, default_charset or input_encoding or iconv.input_encoding is used. +; The precedence is: default_charset < intput_encoding < iconv.input_encoding +;iconv.input_encoding = + +; Use of this INI entry is deprecated, use global internal_encoding instead. +; If empty, default_charset or internal_encoding or iconv.internal_encoding is used. +; The precedence is: default_charset < internal_encoding < iconv.internal_encoding +;iconv.internal_encoding = + +; Use of this INI entry is deprecated, use global output_encoding instead. +; If empty, default_charset or output_encoding or iconv.output_encoding is used. +; The precedence is: default_charset < output_encoding < iconv.output_encoding +; To use an output encoding conversion, iconv's output handler must be set +; otherwise output encoding conversion cannot be performed. +;iconv.output_encoding = + +[intl] +;intl.default_locale = +; This directive allows you to produce PHP errors when some error +; happens within intl functions. The value is the level of the error produced. +; Default is 0, which does not produce any errors. +;intl.error_level = E_WARNING +;intl.use_exceptions = 0 + +[sqlite3] +;sqlite3.extension_dir = + +[Pcre] +;PCRE library backtracking limit. +; http://php.net/pcre.backtrack-limit +;pcre.backtrack_limit=100000 + +;PCRE library recursion limit. +;Please note that if you set this value to a high number you may consume all +;the available process stack and eventually crash PHP (due to reaching the +;stack size limit imposed by the Operating System). +; http://php.net/pcre.recursion-limit +;pcre.recursion_limit=100000 + +;Enables or disables JIT compilation of patterns. This requires the PCRE +;library to be compiled with JIT support. +;pcre.jit=1 + +[Pdo] +; Whether to pool ODBC connections. Can be one of "strict", "relaxed" or "off" +; http://php.net/pdo-odbc.connection-pooling +;pdo_odbc.connection_pooling=strict + +;pdo_odbc.db2_instance_name + +[Pdo_mysql] +; If mysqlnd is used: Number of cache slots for the internal result set cache +; http://php.net/pdo_mysql.cache_size +pdo_mysql.cache_size = 2000 + +; Default socket name for local MySQL connects. If empty, uses the built-in +; MySQL defaults. +; http://php.net/pdo_mysql.default-socket +pdo_mysql.default_socket= + +[Phar] +; http://php.net/phar.readonly +;phar.readonly = On + +; http://php.net/phar.require-hash +;phar.require_hash = On + +;phar.cache_list = + +[mail function] +; For Win32 only. +; http://php.net/smtp +SMTP = localhost +; http://php.net/smtp-port +smtp_port = 25 + +; For Win32 only. +; http://php.net/sendmail-from +;sendmail_from = me@example.com + +; For Unix only. You may supply arguments as well (default: "sendmail -t -i"). +; http://php.net/sendmail-path +;sendmail_path = + +; Force the addition of the specified parameters to be passed as extra parameters +; to the sendmail binary. These parameters will always replace the value of +; the 5th parameter to mail(). +;mail.force_extra_parameters = + +; Add X-PHP-Originating-Script: that will include uid of the script followed by the filename +mail.add_x_header = On + +; The path to a log file that will log all mail() calls. Log entries include +; the full path of the script, line number, To address and headers. +;mail.log = +; Log mail to syslog (Event Log on Windows). +;mail.log = syslog + +[ODBC] +; http://php.net/odbc.default-db +;odbc.default_db = Not yet implemented + +; http://php.net/odbc.default-user +;odbc.default_user = Not yet implemented + +; http://php.net/odbc.default-pw +;odbc.default_pw = Not yet implemented + +; Controls the ODBC cursor model. +; Default: SQL_CURSOR_STATIC (default). +;odbc.default_cursortype + +; Allow or prevent persistent links. +; http://php.net/odbc.allow-persistent +odbc.allow_persistent = On + +; Check that a connection is still valid before reuse. +; http://php.net/odbc.check-persistent +odbc.check_persistent = On + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/odbc.max-persistent +odbc.max_persistent = -1 + +; Maximum number of links (persistent + non-persistent). -1 means no limit. +; http://php.net/odbc.max-links +odbc.max_links = -1 + +; Handling of LONG fields. Returns number of bytes to variables. 0 means +; passthru. +; http://php.net/odbc.defaultlrl +odbc.defaultlrl = 4096 + +; Handling of binary data. 0 means passthru, 1 return as is, 2 convert to char. +; See the documentation on odbc_binmode and odbc_longreadlen for an explanation +; of odbc.defaultlrl and odbc.defaultbinmode +; http://php.net/odbc.defaultbinmode +odbc.defaultbinmode = 1 + +;birdstep.max_links = -1 + +[Interbase] +; Allow or prevent persistent links. +ibase.allow_persistent = 1 + +; Maximum number of persistent links. -1 means no limit. +ibase.max_persistent = -1 + +; Maximum number of links (persistent + non-persistent). -1 means no limit. +ibase.max_links = -1 + +; Default database name for ibase_connect(). +;ibase.default_db = + +; Default username for ibase_connect(). +;ibase.default_user = + +; Default password for ibase_connect(). +;ibase.default_password = + +; Default charset for ibase_connect(). +;ibase.default_charset = + +; Default timestamp format. +ibase.timestampformat = "%Y-%m-%d %H:%M:%S" + +; Default date format. +ibase.dateformat = "%Y-%m-%d" + +; Default time format. +ibase.timeformat = "%H:%M:%S" + +[MySQLi] + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/mysqli.max-persistent +mysqli.max_persistent = -1 + +; Allow accessing, from PHP's perspective, local files with LOAD DATA statements +; http://php.net/mysqli.allow_local_infile +;mysqli.allow_local_infile = On + +; Allow or prevent persistent links. +; http://php.net/mysqli.allow-persistent +mysqli.allow_persistent = On + +; Maximum number of links. -1 means no limit. +; http://php.net/mysqli.max-links +mysqli.max_links = -1 + +; If mysqlnd is used: Number of cache slots for the internal result set cache +; http://php.net/mysqli.cache_size +mysqli.cache_size = 2000 + +; Default port number for mysqli_connect(). If unset, mysqli_connect() will use +; the $MYSQL_TCP_PORT or the mysql-tcp entry in /etc/services or the +; compile-time value defined MYSQL_PORT (in that order). Win32 will only look +; at MYSQL_PORT. +; http://php.net/mysqli.default-port +mysqli.default_port = 3306 + +; Default socket name for local MySQL connects. If empty, uses the built-in +; MySQL defaults. +; http://php.net/mysqli.default-socket +mysqli.default_socket = + +; Default host for mysql_connect() (doesn't apply in safe mode). +; http://php.net/mysqli.default-host +mysqli.default_host = + +; Default user for mysql_connect() (doesn't apply in safe mode). +; http://php.net/mysqli.default-user +mysqli.default_user = + +; Default password for mysqli_connect() (doesn't apply in safe mode). +; Note that this is generally a *bad* idea to store passwords in this file. +; *Any* user with PHP access can run 'echo get_cfg_var("mysqli.default_pw") +; and reveal this password! And of course, any users with read access to this +; file will be able to reveal the password as well. +; http://php.net/mysqli.default-pw +mysqli.default_pw = + +; Allow or prevent reconnect +mysqli.reconnect = Off + +[mysqlnd] +; Enable / Disable collection of general statistics by mysqlnd which can be +; used to tune and monitor MySQL operations. +; http://php.net/mysqlnd.collect_statistics +mysqlnd.collect_statistics = On + +; Enable / Disable collection of memory usage statistics by mysqlnd which can be +; used to tune and monitor MySQL operations. +; http://php.net/mysqlnd.collect_memory_statistics +mysqlnd.collect_memory_statistics = Off + +; Records communication from all extensions using mysqlnd to the specified log +; file. +; http://php.net/mysqlnd.debug +;mysqlnd.debug = + +; Defines which queries will be logged. +; http://php.net/mysqlnd.log_mask +;mysqlnd.log_mask = 0 + +; Default size of the mysqlnd memory pool, which is used by result sets. +; http://php.net/mysqlnd.mempool_default_size +;mysqlnd.mempool_default_size = 16000 + +; Size of a pre-allocated buffer used when sending commands to MySQL in bytes. +; http://php.net/mysqlnd.net_cmd_buffer_size +;mysqlnd.net_cmd_buffer_size = 2048 + +; Size of a pre-allocated buffer used for reading data sent by the server in +; bytes. +; http://php.net/mysqlnd.net_read_buffer_size +;mysqlnd.net_read_buffer_size = 32768 + +; Timeout for network requests in seconds. +; http://php.net/mysqlnd.net_read_timeout +;mysqlnd.net_read_timeout = 31536000 + +; SHA-256 Authentication Plugin related. File with the MySQL server public RSA +; key. +; http://php.net/mysqlnd.sha256_server_public_key +;mysqlnd.sha256_server_public_key = + +[OCI8] + +; Connection: Enables privileged connections using external +; credentials (OCI_SYSOPER, OCI_SYSDBA) +; http://php.net/oci8.privileged-connect +;oci8.privileged_connect = Off + +; Connection: The maximum number of persistent OCI8 connections per +; process. Using -1 means no limit. +; http://php.net/oci8.max-persistent +;oci8.max_persistent = -1 + +; Connection: The maximum number of seconds a process is allowed to +; maintain an idle persistent connection. Using -1 means idle +; persistent connections will be maintained forever. +; http://php.net/oci8.persistent-timeout +;oci8.persistent_timeout = -1 + +; Connection: The number of seconds that must pass before issuing a +; ping during oci_pconnect() to check the connection validity. When +; set to 0, each oci_pconnect() will cause a ping. Using -1 disables +; pings completely. +; http://php.net/oci8.ping-interval +;oci8.ping_interval = 60 + +; Connection: Set this to a user chosen connection class to be used +; for all pooled server requests with Oracle 11g Database Resident +; Connection Pooling (DRCP). To use DRCP, this value should be set to +; the same string for all web servers running the same application, +; the database pool must be configured, and the connection string must +; specify to use a pooled server. +;oci8.connection_class = + +; High Availability: Using On lets PHP receive Fast Application +; Notification (FAN) events generated when a database node fails. The +; database must also be configured to post FAN events. +;oci8.events = Off + +; Tuning: This option enables statement caching, and specifies how +; many statements to cache. Using 0 disables statement caching. +; http://php.net/oci8.statement-cache-size +;oci8.statement_cache_size = 20 + +; Tuning: Enables statement prefetching and sets the default number of +; rows that will be fetched automatically after statement execution. +; http://php.net/oci8.default-prefetch +;oci8.default_prefetch = 100 + +; Compatibility. Using On means oci_close() will not close +; oci_connect() and oci_new_connect() connections. +; http://php.net/oci8.old-oci-close-semantics +;oci8.old_oci_close_semantics = Off + +[PostgreSQL] +; Allow or prevent persistent links. +; http://php.net/pgsql.allow-persistent +pgsql.allow_persistent = On + +; Detect broken persistent links always with pg_pconnect(). +; Auto reset feature requires a little overheads. +; http://php.net/pgsql.auto-reset-persistent +pgsql.auto_reset_persistent = Off + +; Maximum number of persistent links. -1 means no limit. +; http://php.net/pgsql.max-persistent +pgsql.max_persistent = -1 + +; Maximum number of links (persistent+non persistent). -1 means no limit. +; http://php.net/pgsql.max-links +pgsql.max_links = -1 + +; Ignore PostgreSQL backends Notice message or not. +; Notice message logging require a little overheads. +; http://php.net/pgsql.ignore-notice +pgsql.ignore_notice = 0 + +; Log PostgreSQL backends Notice message or not. +; Unless pgsql.ignore_notice=0, module cannot log notice message. +; http://php.net/pgsql.log-notice +pgsql.log_notice = 0 + +[bcmath] +; Number of decimal digits for all bcmath functions. +; http://php.net/bcmath.scale +bcmath.scale = 0 + +[browscap] +; http://php.net/browscap +;browscap = extra/browscap.ini + +[Session] +; Handler used to store/retrieve data. +; http://php.net/session.save-handler +session.save_handler = files + +; Argument passed to save_handler. In the case of files, this is the path +; where data files are stored. Note: Windows users have to change this +; variable in order to use PHP's session functions. +; +; The path can be defined as: +; +; session.save_path = "N;/path" +; +; where N is an integer. Instead of storing all the session files in +; /path, what this will do is use subdirectories N-levels deep, and +; store the session data in those directories. This is useful if +; your OS has problems with many files in one directory, and is +; a more efficient layout for servers that handle many sessions. +; +; NOTE 1: PHP will not create this directory structure automatically. +; You can use the script in the ext/session dir for that purpose. +; NOTE 2: See the section on garbage collection below if you choose to +; use subdirectories for session storage +; +; The file storage module creates files using mode 600 by default. +; You can change that by using +; +; session.save_path = "N;MODE;/path" +; +; where MODE is the octal representation of the mode. Note that this +; does not overwrite the process's umask. +; http://php.net/session.save-path +session.save_path = "/tmp" + +; Whether to use strict session mode. +; Strict session mode does not accept uninitialized session ID and regenerate +; session ID if browser sends uninitialized session ID. Strict mode protects +; applications from session fixation via session adoption vulnerability. It is +; disabled by default for maximum compatibility, but enabling it is encouraged. +; https://wiki.php.net/rfc/strict_sessions +session.use_strict_mode = 0 + +; Whether to use cookies. +; http://php.net/session.use-cookies +session.use_cookies = 1 + +; http://php.net/session.cookie-secure +;session.cookie_secure = + +; This option forces PHP to fetch and use a cookie for storing and maintaining +; the session id. We encourage this operation as it's very helpful in combating +; session hijacking when not specifying and managing your own session id. It is +; not the be-all and end-all of session hijacking defense, but it's a good start. +; http://php.net/session.use-only-cookies +session.use_only_cookies = 1 + +; Name of the session (used as cookie name). +; http://php.net/session.name +session.name = PHPSESSID + +; Initialize session on request startup. +; http://php.net/session.auto-start +session.auto_start = 0 + +; Lifetime in seconds of cookie or, if 0, until browser is restarted. +; http://php.net/session.cookie-lifetime +session.cookie_lifetime = 0 + +; The path for which the cookie is valid. +; http://php.net/session.cookie-path +session.cookie_path = / + +; The domain for which the cookie is valid. +; http://php.net/session.cookie-domain +session.cookie_domain = + +; Whether or not to add the httpOnly flag to the cookie, which makes it inaccessible to browser scripting languages such as JavaScript. +; http://php.net/session.cookie-httponly +session.cookie_httponly = + +; Handler used to serialize data. php is the standard serializer of PHP. +; http://php.net/session.serialize-handler +session.serialize_handler = php + +; Defines the probability that the 'garbage collection' process is started +; on every session initialization. The probability is calculated by using +; gc_probability/gc_divisor. Where session.gc_probability is the numerator +; and gc_divisor is the denominator in the equation. Setting this value to 1 +; when the session.gc_divisor value is 100 will give you approximately a 1% chance +; the gc will run on any give request. +; Default Value: 1 +; Development Value: 1 +; Production Value: 1 +; http://php.net/session.gc-probability +session.gc_probability = 1 + +; Defines the probability that the 'garbage collection' process is started on every +; session initialization. The probability is calculated by using the following equation: +; gc_probability/gc_divisor. Where session.gc_probability is the numerator and +; session.gc_divisor is the denominator in the equation. Setting this value to 1 +; when the session.gc_divisor value is 100 will give you approximately a 1% chance +; the gc will run on any give request. Increasing this value to 1000 will give you +; a 0.1% chance the gc will run on any give request. For high volume production servers, +; this is a more efficient approach. +; Default Value: 100 +; Development Value: 1000 +; Production Value: 1000 +; http://php.net/session.gc-divisor +session.gc_divisor = 1000 + +; After this number of seconds, stored data will be seen as 'garbage' and +; cleaned up by the garbage collection process. +; http://php.net/session.gc-maxlifetime +session.gc_maxlifetime = 1440 + +; NOTE: If you are using the subdirectory option for storing session files +; (see session.save_path above), then garbage collection does *not* +; happen automatically. You will need to do your own garbage +; collection through a shell script, cron entry, or some other method. +; For example, the following script would is the equivalent of +; setting session.gc_maxlifetime to 1440 (1440 seconds = 24 minutes): +; find /path/to/sessions -cmin +24 -type f | xargs rm + +; Check HTTP Referer to invalidate externally stored URLs containing ids. +; HTTP_REFERER has to contain this substring for the session to be +; considered as valid. +; http://php.net/session.referer-check +session.referer_check = + +; Set to {nocache,private,public,} to determine HTTP caching aspects +; or leave this empty to avoid sending anti-caching headers. +; http://php.net/session.cache-limiter +session.cache_limiter = nocache + +; Document expires after n minutes. +; http://php.net/session.cache-expire +session.cache_expire = 180 + +; trans sid support is disabled by default. +; Use of trans sid may risk your users' security. +; Use this option with caution. +; - User may send URL contains active session ID +; to other person via. email/irc/etc. +; - URL that contains active session ID may be stored +; in publicly accessible computer. +; - User may access your site with the same session ID +; always using URL stored in browser's history or bookmarks. +; http://php.net/session.use-trans-sid +session.use_trans_sid = 0 + +; Set session ID character length. This value could be between 22 to 256. +; Shorter length than default is supported only for compatibility reason. +; Users should use 32 or more chars. +; http://php.net/session.sid-length +; Default Value: 32 +; Development Value: 26 +; Production Value: 26 +session.sid_length = 26 + +; The URL rewriter will look for URLs in a defined set of HTML tags. +;
is special; if you include them here, the rewriter will +; add a hidden field with the info which is otherwise appended +; to URLs. tag's action attribute URL will not be modified +; unless it is specified. +; Note that all valid entries require a "=", even if no value follows. +; Default Value: "a=href,area=href,frame=src,form=" +; Development Value: "a=href,area=href,frame=src,form=" +; Production Value: "a=href,area=href,frame=src,form=" +; http://php.net/url-rewriter.tags +session.trans_sid_tags = "a=href,area=href,frame=src,form=" + +; URL rewriter does not rewrite absolute URLs by default. +; To enable rewrites for absolute pathes, target hosts must be specified +; at RUNTIME. i.e. use ini_set() +; tags is special. PHP will check action attribute's URL regardless +; of session.trans_sid_tags setting. +; If no host is defined, HTTP_HOST will be used for allowed host. +; Example value: php.net,www.php.net,wiki.php.net +; Use "," for multiple hosts. No spaces are allowed. +; Default Value: "" +; Development Value: "" +; Production Value: "" +;session.trans_sid_hosts="" + +; Define how many bits are stored in each character when converting +; the binary hash data to something readable. +; Possible values: +; 4 (4 bits: 0-9, a-f) +; 5 (5 bits: 0-9, a-v) +; 6 (6 bits: 0-9, a-z, A-Z, "-", ",") +; Default Value: 4 +; Development Value: 5 +; Production Value: 5 +; http://php.net/session.hash-bits-per-character +session.sid_bits_per_character = 5 + +; Enable upload progress tracking in $_SESSION +; Default Value: On +; Development Value: On +; Production Value: On +; http://php.net/session.upload-progress.enabled +;session.upload_progress.enabled = On + +; Cleanup the progress information as soon as all POST data has been read +; (i.e. upload completed). +; Default Value: On +; Development Value: On +; Production Value: On +; http://php.net/session.upload-progress.cleanup +;session.upload_progress.cleanup = On + +; A prefix used for the upload progress key in $_SESSION +; Default Value: "upload_progress_" +; Development Value: "upload_progress_" +; Production Value: "upload_progress_" +; http://php.net/session.upload-progress.prefix +;session.upload_progress.prefix = "upload_progress_" + +; The index name (concatenated with the prefix) in $_SESSION +; containing the upload progress information +; Default Value: "PHP_SESSION_UPLOAD_PROGRESS" +; Development Value: "PHP_SESSION_UPLOAD_PROGRESS" +; Production Value: "PHP_SESSION_UPLOAD_PROGRESS" +; http://php.net/session.upload-progress.name +;session.upload_progress.name = "PHP_SESSION_UPLOAD_PROGRESS" + +; How frequently the upload progress should be updated. +; Given either in percentages (per-file), or in bytes +; Default Value: "1%" +; Development Value: "1%" +; Production Value: "1%" +; http://php.net/session.upload-progress.freq +;session.upload_progress.freq = "1%" + +; The minimum delay between updates, in seconds +; Default Value: 1 +; Development Value: 1 +; Production Value: 1 +; http://php.net/session.upload-progress.min-freq +;session.upload_progress.min_freq = "1" + +; Only write session data when session data is changed. Enabled by default. +; http://php.net/session.lazy-write +;session.lazy_write = On + +[Assertion] +; Switch whether to compile assertions at all (to have no overhead at run-time) +; -1: Do not compile at all +; 0: Jump over assertion at run-time +; 1: Execute assertions +; Changing from or to a negative value is only possible in php.ini! (For turning assertions on and off at run-time, see assert.active, when zend.assertions = 1) +; Default Value: 1 +; Development Value: 1 +; Production Value: -1 +; http://php.net/zend.assertions +zend.assertions = -1 + +; Assert(expr); active by default. +; http://php.net/assert.active +;assert.active = On + +; Throw an AssertationException on failed assertions +; http://php.net/assert.exception +;assert.exception = On + +; Issue a PHP warning for each failed assertion. (Overridden by assert.exception if active) +; http://php.net/assert.warning +;assert.warning = On + +; Don't bail out by default. +; http://php.net/assert.bail +;assert.bail = Off + +; User-function to be called if an assertion fails. +; http://php.net/assert.callback +;assert.callback = 0 + +; Eval the expression with current error_reporting(). Set to true if you want +; error_reporting(0) around the eval(). +; http://php.net/assert.quiet-eval +;assert.quiet_eval = 0 + +[COM] +; path to a file containing GUIDs, IIDs or filenames of files with TypeLibs +; http://php.net/com.typelib-file +;com.typelib_file = + +; allow Distributed-COM calls +; http://php.net/com.allow-dcom +;com.allow_dcom = true + +; autoregister constants of a components typlib on com_load() +; http://php.net/com.autoregister-typelib +;com.autoregister_typelib = true + +; register constants casesensitive +; http://php.net/com.autoregister-casesensitive +;com.autoregister_casesensitive = false + +; show warnings on duplicate constant registrations +; http://php.net/com.autoregister-verbose +;com.autoregister_verbose = true + +; The default character set code-page to use when passing strings to and from COM objects. +; Default: system ANSI code page +;com.code_page= + +[mbstring] +; language for internal character representation. +; This affects mb_send_mail() and mbstring.detect_order. +; http://php.net/mbstring.language +;mbstring.language = Japanese + +; Use of this INI entry is deprecated, use global internal_encoding instead. +; internal/script encoding. +; Some encoding cannot work as internal encoding. (e.g. SJIS, BIG5, ISO-2022-*) +; If empty, default_charset or internal_encoding or iconv.internal_encoding is used. +; The precedence is: default_charset < internal_encoding < iconv.internal_encoding +;mbstring.internal_encoding = + +; Use of this INI entry is deprecated, use global input_encoding instead. +; http input encoding. +; mbstring.encoding_traslation = On is needed to use this setting. +; If empty, default_charset or input_encoding or mbstring.input is used. +; The precedence is: default_charset < intput_encoding < mbsting.http_input +; http://php.net/mbstring.http-input +;mbstring.http_input = + +; Use of this INI entry is deprecated, use global output_encoding instead. +; http output encoding. +; mb_output_handler must be registered as output buffer to function. +; If empty, default_charset or output_encoding or mbstring.http_output is used. +; The precedence is: default_charset < output_encoding < mbstring.http_output +; To use an output encoding conversion, mbstring's output handler must be set +; otherwise output encoding conversion cannot be performed. +; http://php.net/mbstring.http-output +;mbstring.http_output = + +; enable automatic encoding translation according to +; mbstring.internal_encoding setting. Input chars are +; converted to internal encoding by setting this to On. +; Note: Do _not_ use automatic encoding translation for +; portable libs/applications. +; http://php.net/mbstring.encoding-translation +;mbstring.encoding_translation = Off + +; automatic encoding detection order. +; "auto" detect order is changed according to mbstring.language +; http://php.net/mbstring.detect-order +;mbstring.detect_order = auto + +; substitute_character used when character cannot be converted +; one from another +; http://php.net/mbstring.substitute-character +;mbstring.substitute_character = none + +; overload(replace) single byte functions by mbstring functions. +; mail(), ereg(), etc are overloaded by mb_send_mail(), mb_ereg(), +; etc. Possible values are 0,1,2,4 or combination of them. +; For example, 7 for overload everything. +; 0: No overload +; 1: Overload mail() function +; 2: Overload str*() functions +; 4: Overload ereg*() functions +; http://php.net/mbstring.func-overload +;mbstring.func_overload = 0 + +; enable strict encoding detection. +; Default: Off +;mbstring.strict_detection = On + +; This directive specifies the regex pattern of content types for which mb_output_handler() +; is activated. +; Default: mbstring.http_output_conv_mimetype=^(text/|application/xhtml\+xml) +;mbstring.http_output_conv_mimetype= + +[gd] +; Tell the jpeg decode to ignore warnings and try to create +; a gd image. The warning will then be displayed as notices +; disabled by default +; http://php.net/gd.jpeg-ignore-warning +;gd.jpeg_ignore_warning = 1 + +[exif] +; Exif UNICODE user comments are handled as UCS-2BE/UCS-2LE and JIS as JIS. +; With mbstring support this will automatically be converted into the encoding +; given by corresponding encode setting. When empty mbstring.internal_encoding +; is used. For the decode settings you can distinguish between motorola and +; intel byte order. A decode setting cannot be empty. +; http://php.net/exif.encode-unicode +;exif.encode_unicode = ISO-8859-15 + +; http://php.net/exif.decode-unicode-motorola +;exif.decode_unicode_motorola = UCS-2BE + +; http://php.net/exif.decode-unicode-intel +;exif.decode_unicode_intel = UCS-2LE + +; http://php.net/exif.encode-jis +;exif.encode_jis = + +; http://php.net/exif.decode-jis-motorola +;exif.decode_jis_motorola = JIS + +; http://php.net/exif.decode-jis-intel +;exif.decode_jis_intel = JIS + +[Tidy] +; The path to a default tidy configuration file to use when using tidy +; http://php.net/tidy.default-config +;tidy.default_config = /usr/local/lib/php/default.tcfg + +; Should tidy clean and repair output automatically? +; WARNING: Do not use this option if you are generating non-html content +; such as dynamic images +; http://php.net/tidy.clean-output +tidy.clean_output = Off + +[soap] +; Enables or disables WSDL caching feature. +; http://php.net/soap.wsdl-cache-enabled +soap.wsdl_cache_enabled=1 + +; Sets the directory name where SOAP extension will put cache files. +; http://php.net/soap.wsdl-cache-dir +soap.wsdl_cache_dir="/tmp" + +; (time to live) Sets the number of second while cached file will be used +; instead of original one. +; http://php.net/soap.wsdl-cache-ttl +soap.wsdl_cache_ttl=86400 + +; Sets the size of the cache limit. (Max. number of WSDL files to cache) +soap.wsdl_cache_limit = 5 + +[sysvshm] +; A default size of the shared memory segment +;sysvshm.init_mem = 10000 + +[ldap] +; Sets the maximum number of open links or -1 for unlimited. +ldap.max_links = -1 + +[dba] +;dba.default_handler= + +[opcache] +; Determines if Zend OPCache is enabled +;opcache.enable=1 + +; Determines if Zend OPCache is enabled for the CLI version of PHP +;opcache.enable_cli=1 + +; The OPcache shared memory storage size. +;opcache.memory_consumption=128 + +; The amount of memory for interned strings in Mbytes. +;opcache.interned_strings_buffer=8 + +; The maximum number of keys (scripts) in the OPcache hash table. +; Only numbers between 200 and 1000000 are allowed. +;opcache.max_accelerated_files=10000 + +; The maximum percentage of "wasted" memory until a restart is scheduled. +;opcache.max_wasted_percentage=5 + +; When this directive is enabled, the OPcache appends the current working +; directory to the script key, thus eliminating possible collisions between +; files with the same name (basename). Disabling the directive improves +; performance, but may break existing applications. +;opcache.use_cwd=1 + +; When disabled, you must reset the OPcache manually or restart the +; webserver for changes to the filesystem to take effect. +;opcache.validate_timestamps=1 + +; How often (in seconds) to check file timestamps for changes to the shared +; memory storage allocation. ("1" means validate once per second, but only +; once per request. "0" means always validate) +;opcache.revalidate_freq=2 + +; Enables or disables file search in include_path optimization +;opcache.revalidate_path=0 + +; If disabled, all PHPDoc comments are dropped from the code to reduce the +; size of the optimized code. +;opcache.save_comments=1 + +; If enabled, a fast shutdown sequence is used for the accelerated code +; Depending on the used Memory Manager this may cause some incompatibilities. +;opcache.fast_shutdown=0 + +; Allow file existence override (file_exists, etc.) performance feature. +;opcache.enable_file_override=0 + +; A bitmask, where each bit enables or disables the appropriate OPcache +; passes +;opcache.optimization_level=0xffffffff + +;opcache.inherited_hack=1 +;opcache.dups_fix=0 + +; The location of the OPcache blacklist file (wildcards allowed). +; Each OPcache blacklist file is a text file that holds the names of files +; that should not be accelerated. The file format is to add each filename +; to a new line. The filename may be a full path or just a file prefix +; (i.e., /var/www/x blacklists all the files and directories in /var/www +; that start with 'x'). Line starting with a ; are ignored (comments). +;opcache.blacklist_filename= + +; Allows exclusion of large files from being cached. By default all files +; are cached. +;opcache.max_file_size=0 + +; Check the cache checksum each N requests. +; The default value of "0" means that the checks are disabled. +;opcache.consistency_checks=0 + +; How long to wait (in seconds) for a scheduled restart to begin if the cache +; is not being accessed. +;opcache.force_restart_timeout=180 + +; OPcache error_log file name. Empty string assumes "stderr". +;opcache.error_log= + +; All OPcache errors go to the Web server log. +; By default, only fatal errors (level 0) or errors (level 1) are logged. +; You can also enable warnings (level 2), info messages (level 3) or +; debug messages (level 4). +;opcache.log_verbosity_level=1 + +; Preferred Shared Memory back-end. Leave empty and let the system decide. +;opcache.preferred_memory_model= + +; Protect the shared memory from unexpected writing during script execution. +; Useful for internal debugging only. +;opcache.protect_memory=0 + +; Allows calling OPcache API functions only from PHP scripts which path is +; started from specified string. The default "" means no restriction +;opcache.restrict_api= + +; Mapping base of shared memory segments (for Windows only). All the PHP +; processes have to map shared memory into the same address space. This +; directive allows to manually fix the "Unable to reattach to base address" +; errors. +;opcache.mmap_base= + +; Enables and sets the second level cache directory. +; It should improve performance when SHM memory is full, at server restart or +; SHM reset. The default "" disables file based caching. +;opcache.file_cache= + +; Enables or disables opcode caching in shared memory. +;opcache.file_cache_only=0 + +; Enables or disables checksum validation when script loaded from file cache. +;opcache.file_cache_consistency_checks=1 + +; Implies opcache.file_cache_only=1 for a certain process that failed to +; reattach to the shared memory (for Windows only). Explicitly enabled file +; cache is required. +;opcache.file_cache_fallback=1 + +; Enables or disables copying of PHP code (text segment) into HUGE PAGES. +; This should improve performance, but requires appropriate OS configuration. +;opcache.huge_code_pages=1 + +; Validate cached file permissions. +;opcache.validate_permission=0 + +; Prevent name collisions in chroot'ed environment. +;opcache.validate_root=0 + +[curl] +; A default value for the CURLOPT_CAINFO option. This is required to be an +; absolute path. +;curl.cainfo = + +[openssl] +; The location of a Certificate Authority (CA) file on the local filesystem +; to use when verifying the identity of SSL/TLS peers. Most users should +; not specify a value for this directive as PHP will attempt to use the +; OS-managed cert stores in its absence. If specified, this value may still +; be overridden on a per-stream basis via the "cafile" SSL stream context +; option. +;openssl.cafile= + +; If openssl.cafile is not specified or if the CA file is not found, the +; directory pointed to by openssl.capath is searched for a suitable +; certificate. This value must be a correctly hashed certificate directory. +; Most users should not specify a value for this directive as PHP will +; attempt to use the OS-managed cert stores in its absence. If specified, +; this value may still be overridden on a per-stream basis via the "capath" +; SSL stream context option. +;openssl.capath= + +; Local Variables: +; tab-width: 4 +; End: diff --git a/php-fpm/xhprof.ini b/php-fpm/xhprof.ini new file mode 100644 index 00000000..1b010b9f --- /dev/null +++ b/php-fpm/xhprof.ini @@ -0,0 +1,8 @@ +[xhprof] +; extension=xhprof.so +extension=tideways.so +xhprof.output_dir=/var/www/xhprof +; no need to autoload, control in the program +tideways.auto_prepend_library=0 +; set default rate +tideways.sample_rate=100 \ No newline at end of file diff --git a/php-worker/Dockerfile b/php-worker/Dockerfile index 555e59fc..b2e496f3 100644 --- a/php-worker/Dockerfile +++ b/php-worker/Dockerfile @@ -16,6 +16,7 @@ RUN apk --update add wget \ libmemcached-dev \ libmcrypt-dev \ libxml2-dev \ + pcre-dev \ zlib-dev \ autoconf \ cyrus-sasl-dev \ @@ -23,7 +24,16 @@ RUN apk --update add wget \ supervisor RUN docker-php-ext-install mysqli mbstring pdo pdo_mysql tokenizer xml pcntl -RUN pecl channel-update pecl.php.net && pecl install memcached mcrypt-1.0.1 && docker-php-ext-enable memcached +RUN pecl channel-update pecl.php.net && pecl install memcached mcrypt-1.0.1 mongodb && docker-php-ext-enable memcached mongodb + +# Add a non-root user: +ARG PUID=1000 +ENV PUID ${PUID} +ARG PGID=1000 +ENV PGID ${PGID} + +RUN addgroup -g ${PGID} laradock && \ + adduser -D -G laradock -u ${PUID} laradock #Install SOAP package: ARG INSTALL_SOAP=false @@ -53,9 +63,112 @@ RUN if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \ docker-php-ext-install zip \ ;fi +# Install MySQL Client: +ARG INSTALL_MYSQL_CLIENT=false +RUN if [ ${INSTALL_MYSQL_CLIENT} = true ]; then \ + apk --update add mysql-client \ +;fi + +# Install FFMPEG: +ARG INSTALL_FFMPEG=false +RUN if [ ${INSTALL_FFMPEG} = true ]; then \ + apk --update add ffmpeg \ +;fi + +# Install AMQP: +ARG INSTALL_AMQP=false + +RUN if [ ${INSTALL_AMQP} = true ]; then \ + apk --update add rabbitmq-c rabbitmq-c-dev && \ + pecl install amqp && \ + docker-php-ext-enable amqp && \ + docker-php-ext-install sockets \ +;fi + +# Install Cassandra drivers: +ARG INSTALL_CASSANDRA=false +RUN if [ ${INSTALL_CASSANDRA} = true ]; then \ + apk --update add cassandra-cpp-driver \ + ;fi + +WORKDIR /usr/src +RUN if [ ${INSTALL_CASSANDRA} = true ]; then \ + git clone https://github.com/datastax/php-driver.git \ + && cd php-driver/ext \ + && phpize \ + && mkdir -p /usr/src/php-driver/build \ + && cd /usr/src/php-driver/build \ + && ../ext/configure --with-php-config=/usr/bin/php-config7.1 > /dev/null \ + && make clean >/dev/null \ + && make >/dev/null 2>&1 \ + && make install \ + && docker-php-ext-enable cassandra \ +;fi + +# Install Phalcon ext +ARG INSTALL_PHALCON=false +ARG PHALCON_VERSION +ENV PHALCON_VERSION ${PHALCON_VERSION} + +RUN if [ $INSTALL_PHALCON = true ]; then \ + apk --update add unzip gcc make re2c bash\ + && curl -L -o /tmp/cphalcon.zip https://github.com/phalcon/cphalcon/archive/v${PHALCON_VERSION}.zip \ + && unzip -d /tmp/ /tmp/cphalcon.zip \ + && cd /tmp/cphalcon-${PHALCON_VERSION}/build \ + && ./install \ + && rm -rf /tmp/cphalcon* \ +;fi + +RUN if [ $INSTALL_GHOSTSCRIPT = true ]; then \ + apk --update add ghostscript \ +;fi + +#Install GMP package: +ARG INSTALL_GMP=false +RUN if [ ${INSTALL_GMP} = true ]; then \ + apk add --update --no-cache gmp gmp-dev \ + && docker-php-ext-install gmp \ +;fi + + RUN rm /var/cache/apk/* \ && mkdir -p /var/www + +########################################################################### +# Swoole EXTENSION +########################################################################### + +ARG INSTALL_SWOOLE=false + +RUN if [ ${INSTALL_SWOOLE} = true ]; then \ + # Install Php Swoole Extension + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ + pecl -q install swoole-2.0.10; \ + else \ + if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \ + pecl install swoole-2.2.0; \ + else \ + pecl install swoole; \ + fi \ + fi \ + && docker-php-ext-enable swoole \ +;fi + +########################################################################### +# Taint EXTENSION +########################################################################### + +ARG INSTALL_TAINT=false + +RUN if [ ${INSTALL_TAINT} = true ]; then \ + # Install Php TAINT Extension + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ]; then \ + pecl install taint; \ + fi && \ + docker-php-ext-enable taint \ +;fi + # #-------------------------------------------------------------------------- # Optional Supervisord Configuration diff --git a/php-worker/supervisord.d/laravel-scheduler.conf.example b/php-worker/supervisord.d/laravel-scheduler.conf.example new file mode 100644 index 00000000..0e83f878 --- /dev/null +++ b/php-worker/supervisord.d/laravel-scheduler.conf.example @@ -0,0 +1,8 @@ +[program:laravel-scheduler] +process_name=%(program_name)s_%(process_num)02d +command=/bin/sh -c "while [ true ]; do (php /var/www/artisan schedule:run --verbose --no-interaction &); sleep 60; done" +autostart=true +autorestart=true +numprocs=1 +user=laradock +redirect_stderr=true diff --git a/php-worker/supervisord.d/laravel-worker.conf.example b/php-worker/supervisord.d/laravel-worker.conf.example index 06156bc5..06401183 100644 --- a/php-worker/supervisord.d/laravel-worker.conf.example +++ b/php-worker/supervisord.d/laravel-worker.conf.example @@ -4,4 +4,5 @@ command=php /var/www/artisan queue:work --sleep=3 --tries=3 --daemon autostart=true autorestart=true numprocs=8 +user=laradock redirect_stderr=true diff --git a/postgres/docker-entrypoint-initdb.d/.gitignore b/postgres/docker-entrypoint-initdb.d/.gitignore index c462039b..a56b450c 100644 --- a/postgres/docker-entrypoint-initdb.d/.gitignore +++ b/postgres/docker-entrypoint-initdb.d/.gitignore @@ -1,3 +1,5 @@ *.sh !init_gitlab_db.sh !init_jupyterhub_db.sh +!init_sonarqube_db.sh +!init_confluence_db.sh \ No newline at end of file diff --git a/postgres/docker-entrypoint-initdb.d/init_confluence_db.sh b/postgres/docker-entrypoint-initdb.d/init_confluence_db.sh new file mode 100644 index 00000000..ce5e9f72 --- /dev/null +++ b/postgres/docker-entrypoint-initdb.d/init_confluence_db.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copy createdb.sh.example to createdb.sh +# then uncomment then set database name and username to create you need databases +# +# example: .env POSTGRES_USER=appuser and need db name is myshop_db +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER myuser WITH PASSWORD 'mypassword'; +# CREATE DATABASE myshop_db; +# GRANT ALL PRIVILEGES ON DATABASE myshop_db TO myuser; +# EOSQL +# +# this sh script will auto run when the postgres container starts and the $DATA_PATH_HOST/postgres not found. +# +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db1 WITH PASSWORD 'db1'; +# CREATE DATABASE db1; +# GRANT ALL PRIVILEGES ON DATABASE db1 TO db1; +# EOSQL +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db2 WITH PASSWORD 'db2'; +# CREATE DATABASE db2; +# GRANT ALL PRIVILEGES ON DATABASE db2 TO db2; +# EOSQL +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db3 WITH PASSWORD 'db3'; +# CREATE DATABASE db3; +# GRANT ALL PRIVILEGES ON DATABASE db3 TO db3; +# EOSQL +# +### default database and user for confluence ############################################## +if [ "$POSTGRES_CONFLUENCE_INIT" == 'true' ]; then + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER $POSTGRES_CONFLUENCE_USER WITH PASSWORD '$POSTGRES_CONFLUENCE_PASSWORD'; + CREATE DATABASE $POSTGRES_CONFLUENCE_DB; + GRANT ALL PRIVILEGES ON DATABASE $POSTGRES_CONFLUENCE_DB TO $POSTGRES_CONFLUENCE_USER; + ALTER ROLE $POSTGRES_CONFLUENCE_USER CREATEROLE SUPERUSER; + EOSQL + echo +fi \ No newline at end of file diff --git a/postgres/docker-entrypoint-initdb.d/init_gitlab_db.sh b/postgres/docker-entrypoint-initdb.d/init_gitlab_db.sh index d9d7738a..4f4267df 100644 --- a/postgres/docker-entrypoint-initdb.d/init_gitlab_db.sh +++ b/postgres/docker-entrypoint-initdb.d/init_gitlab_db.sh @@ -33,9 +33,12 @@ # EOSQL # ### default database and user for gitlab ############################################## -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - CREATE USER laradock_gitlab WITH PASSWORD 'laradock_gitlab'; - CREATE DATABASE laradock_gitlab; - GRANT ALL PRIVILEGES ON DATABASE laradock_gitlab TO laradock_gitlab; - ALTER ROLE laradock_gitlab CREATEROLE SUPERUSER; -EOSQL +if [ "$GITLAB_POSTGRES_INIT" == 'true' ]; then + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER $GITLAB_POSTGRES_USER WITH PASSWORD '$GITLAB_POSTGRES_PASSWORD'; + CREATE DATABASE $GITLAB_POSTGRES_DB; + GRANT ALL PRIVILEGES ON DATABASE $GITLAB_POSTGRES_DB TO $GITLAB_POSTGRES_USER; + ALTER ROLE $GITLAB_POSTGRES_USER CREATEROLE SUPERUSER; + EOSQL + echo +fi \ No newline at end of file diff --git a/postgres/docker-entrypoint-initdb.d/init_jupyterhub_db.sh b/postgres/docker-entrypoint-initdb.d/init_jupyterhub_db.sh index 6f3d44c3..c3869795 100644 --- a/postgres/docker-entrypoint-initdb.d/init_jupyterhub_db.sh +++ b/postgres/docker-entrypoint-initdb.d/init_jupyterhub_db.sh @@ -33,9 +33,12 @@ # EOSQL # ### default database and user for jupyterhub ############################################## -psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL - CREATE USER laradock_jupyterhub WITH PASSWORD 'laradock_jupyterhub'; - CREATE DATABASE laradock_jupyterhub; - GRANT ALL PRIVILEGES ON DATABASE laradock_jupyterhub TO laradock_jupyterhub; - ALTER ROLE laradock_jupyterhub CREATEROLE SUPERUSER; -EOSQL +if [ "$JUPYTERHUB_POSTGRES_INIT" == 'true' ]; then + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER $JUPYTERHUB_POSTGRES_USER WITH PASSWORD '$JUPYTERHUB_POSTGRES_PASSWORD'; + CREATE DATABASE $JUPYTERHUB_POSTGRES_DB; + GRANT ALL PRIVILEGES ON DATABASE $JUPYTERHUB_POSTGRES_DB TO $JUPYTERHUB_POSTGRES_USER; + ALTER ROLE $JUPYTERHUB_POSTGRES_USER CREATEROLE SUPERUSER; + EOSQL + echo +fi diff --git a/postgres/docker-entrypoint-initdb.d/init_sonarqube_db.sh b/postgres/docker-entrypoint-initdb.d/init_sonarqube_db.sh new file mode 100644 index 00000000..fea961de --- /dev/null +++ b/postgres/docker-entrypoint-initdb.d/init_sonarqube_db.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Copy createdb.sh.example to createdb.sh +# then uncomment then set database name and username to create you need databases +# +# example: .env POSTGRES_USER=appuser and need db name is myshop_db +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER myuser WITH PASSWORD 'mypassword'; +# CREATE DATABASE myshop_db; +# GRANT ALL PRIVILEGES ON DATABASE myshop_db TO myuser; +# EOSQL +# +# this sh script will auto run when the postgres container starts and the $DATA_PATH_HOST/postgres not found. +# +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db1 WITH PASSWORD 'db1'; +# CREATE DATABASE db1; +# GRANT ALL PRIVILEGES ON DATABASE db1 TO db1; +# EOSQL +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db2 WITH PASSWORD 'db2'; +# CREATE DATABASE db2; +# GRANT ALL PRIVILEGES ON DATABASE db2 TO db2; +# EOSQL +# +# psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL +# CREATE USER db3 WITH PASSWORD 'db3'; +# CREATE DATABASE db3; +# GRANT ALL PRIVILEGES ON DATABASE db3 TO db3; +# EOSQL +# +### default database and user for gitlab ############################################## +if [ "$SONARQUBE_POSTGRES_INIT" == 'true' ]; then + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL + CREATE USER $SONARQUBE_POSTGRES_USER WITH PASSWORD '$SONARQUBE_POSTGRES_PASSWORD'; + CREATE DATABASE $SONARQUBE_POSTGRES_DB; + GRANT ALL PRIVILEGES ON DATABASE $SONARQUBE_POSTGRES_DB TO $SONARQUBE_POSTGRES_USER; + ALTER ROLE $SONARQUBE_POSTGRES_USER CREATEROLE SUPERUSER; + EOSQL + echo +fi diff --git a/rabbitmq/Dockerfile b/rabbitmq/Dockerfile index d79b4eda..1e232d47 100644 --- a/rabbitmq/Dockerfile +++ b/rabbitmq/Dockerfile @@ -1,7 +1,7 @@ -FROM rabbitmq +FROM rabbitmq:alpine LABEL maintainer="Mahmoud Zalt " RUN rabbitmq-plugins enable --offline rabbitmq_management -EXPOSE 15671 15672 +EXPOSE 4369 5671 5672 15671 15672 25672 diff --git a/redis-cluster/Dockerfile b/redis-cluster/Dockerfile new file mode 100644 index 00000000..d610fc43 --- /dev/null +++ b/redis-cluster/Dockerfile @@ -0,0 +1,3 @@ +FROM grokzen/redis-cluster:latest + +LABEL maintainer="hareku " diff --git a/redis-webui/Dockerfile b/redis-webui/Dockerfile new file mode 100644 index 00000000..fb026acb --- /dev/null +++ b/redis-webui/Dockerfile @@ -0,0 +1,3 @@ +FROM erikdubbelboer/phpredisadmin + +LABEL maintainer="ahkui " diff --git a/redis/redis.conf b/redis/redis.conf new file mode 100644 index 00000000..eb03c584 --- /dev/null +++ b/redis/redis.conf @@ -0,0 +1,1377 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 loopback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New replicas and reconnecting replicas that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the replicas. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new replicas arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple replicas +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 +# seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a replica +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the replica missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the replica can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a replica connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected replicas for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last replica disconnected, for +# the backlog buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a +# master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a replica is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested +# even in production and manually tested by multiple engineers for some +# time. +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in an "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag yes + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage +# active-defrag-cycle-min 5 + +# Maximal effort for defrag in CPU percentage +# active-defrag-cycle-max 75 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 diff --git a/rethinkdb/Dockerfile b/rethinkdb/Dockerfile index f7db9a1f..f905769b 100644 --- a/rethinkdb/Dockerfile +++ b/rethinkdb/Dockerfile @@ -4,6 +4,13 @@ LABEL maintainer="Cristian Mello " VOLUME /data/rethinkdb_data +#Necessary for the backup rethinkdb +RUN apt-get -y update \ + && apt-get -y upgrade \ + && apt-get -y install python-pip \ + && pip install rethinkdb \ + && rm -rf /var/lib/apt/lists/* + RUN cp /etc/rethinkdb/default.conf.sample /etc/rethinkdb/instances.d/instance1.conf CMD ["rethinkdb", "--bind", "all"] diff --git a/solr/Dockerfile b/solr/Dockerfile index c133a6a2..ca5baff7 100644 --- a/solr/Dockerfile +++ b/solr/Dockerfile @@ -18,7 +18,7 @@ ENV SOLR_DATAIMPORTHANDLER_MSSQL ${SOLR_DATAIMPORTHANDLER_MSSQL} # download mssql connector for dataimporthandler RUN if [ ${SOLR_DATAIMPORTHANDLER_MSSQL} = true ]; then \ curl -L -o /tmp/mssql-jdbc-7.0.0.jre8.jar "https://github.com/Microsoft/mssql-jdbc/releases/download/v7.0.0/mssql-jdbc-7.0.0.jre8.jar" \ - && mkdir /opt/solr/contrib/dataimporthandler/lib \ + && mkdir -p /opt/solr/contrib/dataimporthandler/lib \ && mv /tmp/mssql-jdbc-7.0.0.jre8.jar "/opt/solr/contrib/dataimporthandler/lib/mssql-jdbc-7.0.0.jre8.jar" \ ;fi diff --git a/sonarqube/Dockerfile b/sonarqube/Dockerfile new file mode 100644 index 00000000..7b32ead3 --- /dev/null +++ b/sonarqube/Dockerfile @@ -0,0 +1,3 @@ +FROM sonarqube:latest + +LABEL maintainer="xiagw " diff --git a/traefik/Dockerfile b/traefik/Dockerfile new file mode 100644 index 00000000..73825fd4 --- /dev/null +++ b/traefik/Dockerfile @@ -0,0 +1,7 @@ +FROM traefik:1.7.5-alpine + +LABEL maintainer="Luis Coutinho " + +COPY traefik.toml acme.json / + +RUN chmod 600 /acme.json diff --git a/traefik/acme.json b/traefik/acme.json new file mode 100644 index 00000000..e69de29b diff --git a/traefik/traefik.toml b/traefik/traefik.toml new file mode 100644 index 00000000..5875b94c --- /dev/null +++ b/traefik/traefik.toml @@ -0,0 +1,23 @@ +defaultEntryPoints = ["http", "https"] + +[entryPoints] + [entryPoints.http] + address = ":80" + [entryPoints.http.redirect] + entryPoint = "https" + [entryPoints.https] + address = ":443" + [entryPoints.https.tls] + +[web] +address = ":8080" +[acme] +email = "email@example.org" +storage = "acme.json" +entryPoint = "https" +onHostRule = true + [acme.httpChallenge] + entryPoint = "http" + +[[acme.domais]] + main = "localhost" diff --git a/travis-build.sh b/travis-build.sh index eeee67ba..e773b823 100755 --- a/travis-build.sh +++ b/travis-build.sh @@ -16,9 +16,23 @@ if [ -n "${PHP_VERSION}" ]; then sed -i -- 's/=false/=true/g' .env sed -i -- 's/PHPDBG=true/PHPDBG=false/g' .env if [ "${PHP_VERSION}" == "5.6" ]; then - sed -i -- 's/^AEROSPIKE_PHP_REPOSITORY=/##AEROSPIKE_PHP_REPOSITORY=/g' .env - sed -i -- 's/^# AEROSPIKE_PHP_REPOSITORY=/AEROSPIKE_PHP_REPOSITORY=/g' .env + # Aerospike C Client SDK 4.0.7, Debian 9.6 is not supported + # https://github.com/aerospike/aerospike-client-php5/issues/145 + sed -i -- 's/PHP_FPM_INSTALL_AEROSPIKE=true/PHP_FPM_INSTALL_AEROSPIKE=false/g' .env fi + if [ "${PHP_VERSION}" == "7.3" ]; then + # V8JS extension does not yet support PHP 7.3. + sed -i -- 's/WORKSPACE_INSTALL_V8JS=true/WORKSPACE_INSTALL_V8JS=false/g' .env + # This ssh2 extension does not yet support PHP 7.3. + sed -i -- 's/PHP_FPM_INSTALL_SSH2=true/PHP_FPM_INSTALL_SSH2=false/g' .env + # xdebug extension does not yet support PHP 7.3. + sed -i -- 's/PHP_FPM_INSTALL_XDEBUG=true/PHP_FPM_INSTALL_XDEBUG=false/g' .env + # memcached extension does not yet support PHP 7.3. + sed -i -- 's/PHP_FPM_INSTALL_MEMCACHED=true/PHP_FPM_INSTALL_MEMCACHED=false/g' .env + fi + + sed -i -- 's/CHANGE_SOURCE=true/CHANGE_SOURCE=false/g' .env + cat .env docker-compose build ${BUILD_SERVICE} docker images diff --git a/workspace/Dockerfile b/workspace/Dockerfile index 8e65a2d3..cbbeb2e9 100644 --- a/workspace/Dockerfile +++ b/workspace/Dockerfile @@ -14,7 +14,8 @@ ARG LARADOCK_PHP_VERSION -FROM laradock/workspace:2.2-${LARADOCK_PHP_VERSION} +# FROM laradock/workspace:2.2-${LARADOCK_PHP_VERSION} +FROM letsdockerize/laradock-workspace:2.4-${LARADOCK_PHP_VERSION} LABEL maintainer="Mahmoud Zalt " @@ -37,22 +38,31 @@ ARG PGID=1000 ENV PGID ${PGID} # always run apt update when start and after add new source list, then clean up at end. -RUN apt-get update -yqq && \ +RUN set -xe; \ + apt-get update -yqq && \ pecl channel-update pecl.php.net && \ groupadd -g ${PGID} laradock && \ useradd -u ${PUID} -g laradock -m laradock -G docker_env && \ - usermod -p "*" laradock - -# -#-------------------------------------------------------------------------- -# Mandatory Software's Installation -#-------------------------------------------------------------------------- -# -# Mandatory Software's such as ("php-cli", "git", "vim", ....) are -# installed on the base image 'laradock/workspace' image. If you want -# to add more Software's or remove existing one, you need to edit the -# base image (https://github.com/Laradock/workspace). -# + usermod -p "*" laradock -s /bin/bash && \ + apt-get install -yqq \ + apt-utils \ + # + #-------------------------------------------------------------------------- + # Mandatory Software's Installation + #-------------------------------------------------------------------------- + # + # Mandatory Software's such as ("php-cli", "git", "vim", ....) are + # installed on the base image 'laradock/workspace' image. If you want + # to add more Software's or remove existing one, you need to edit the + # base image (https://github.com/Laradock/workspace). + # + # next lines are here becase there is no auto build on dockerhub see https://github.com/laradock/laradock/pull/1903#issuecomment-463142846 + libzip-dev zip unzip \ + # Install the zip extension + php${LARADOCK_PHP_VERSION}-zip \ + # nasm + nasm && \ + php -m | grep -q 'zip' # #-------------------------------------------------------------------------- @@ -90,14 +100,14 @@ RUN sed -i 's/\r//' /root/aliases.sh && \ echo "" >> ~/.bashrc && \ echo "# Load Custom Aliases" >> ~/.bashrc && \ echo "source ~/aliases.sh" >> ~/.bashrc && \ - echo "" >> ~/.bashrc + echo "" >> ~/.bashrc USER laradock RUN echo "" >> ~/.bashrc && \ echo "# Load Custom Aliases" >> ~/.bashrc && \ echo "source ~/aliases.sh" >> ~/.bashrc && \ - echo "" >> ~/.bashrc + echo "" >> ~/.bashrc ########################################################################### # Composer: @@ -108,9 +118,16 @@ USER root # Add the composer.json COPY ./composer.json /home/laradock/.composer/composer.json +# Add the auth.json for magento 2 credentials +COPY ./auth.json /home/laradock/.composer/auth.json + # Make sure that ~/.composer belongs to laradock RUN chown -R laradock:laradock /home/laradock/.composer +# Export composer vendor path +RUN echo "" >> ~/.bashrc && \ + echo 'export PATH="$HOME/.composer/vendor/bin:$PATH"' >> ~/.bashrc + USER laradock # Check if global install need to be ran @@ -122,6 +139,15 @@ RUN if [ ${COMPOSER_GLOBAL_INSTALL} = true ]; then \ composer global install \ ;fi +# Check if auth file is disabled +ARG COMPOSER_AUTH=false +ENV COMPOSER_AUTH ${COMPOSER_AUTH} + +RUN if [ ${COMPOSER_AUTH} = false ]; then \ + # remove the file + rm /home/laradock/.composer/auth.json \ +;fi + ARG COMPOSER_REPO_PACKAGIST ENV COMPOSER_REPO_PACKAGIST ${COMPOSER_REPO_PACKAGIST} @@ -174,6 +200,21 @@ RUN if [ ${INSTALL_DRUSH} = true ]; then \ drush core-status \ ;fi +########################################################################### +# WP CLI: +########################################################################### + +# The command line interface for WordPress + +USER root + +ARG INSTALL_WP_CLI=false + +RUN if [ ${INSTALL_WP_CLI} = true ]; then \ + curl -fsSL -o /usr/local/bin/wp https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar | bash && \ + chmod +x /usr/local/bin/wp \ +;fi + ########################################################################### # SSH2: ########################################################################### @@ -194,11 +235,11 @@ RUN if [ ${INSTALL_SSH2} = true ]; then \ USER root ARG INSTALL_GMP=false -ARG PHP_VERSION=${PHP_VERSION} +ARG PHP_VERSION=${LARADOCK_PHP_VERSION} RUN if [ ${INSTALL_GMP} = true ]; then \ # Install the PHP GMP extension - apt-get -y install php${PHP_VERSION}-gmp \ + apt-get -y install php${LARADOCK_PHP_VERSION}-gmp \ ;fi ########################################################################### @@ -214,6 +255,20 @@ RUN if [ ${INSTALL_SOAP} = true ]; then \ apt-get -y install libxml2-dev php${LARADOCK_PHP_VERSION}-soap \ ;fi +########################################################################### +# XSL: +########################################################################### + +USER root + +ARG INSTALL_XSL=false + +RUN if [ ${INSTALL_XSL} = true ]; then \ + # Install the PHP XSL extension + apt-get -y install libxslt-dev php${LARADOCK_PHP_VERSION}-xsl \ +;fi + + ########################################################################### # LDAP: ########################################################################### @@ -259,8 +314,7 @@ ARG INSTALL_XDEBUG=false RUN if [ ${INSTALL_XDEBUG} = true ]; then \ # Load the xdebug extension only with phpunit commands apt-get install -y php${LARADOCK_PHP_VERSION}-xdebug && \ - sed -i 's/^;//g' /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-xdebug.ini && \ - echo "alias phpunit='php -dzend_extension=xdebug.so /var/www/vendor/bin/phpunit'" >> ~/.bashrc \ + sed -i 's/^;//g' /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-xdebug.ini \ ;fi # ADD for REMOTE debugging @@ -294,7 +348,7 @@ ARG BLACKFIRE_CLIENT_TOKEN ENV BLACKFIRE_CLIENT_TOKEN ${BLACKFIRE_CLIENT_TOKEN} RUN if [ ${INSTALL_XDEBUG} = false -a ${INSTALL_BLACKFIRE} = true ]; then \ - curl -L https://packagecloud.io/gpg.key | apt-key add - && \ + curl -L https://packages.blackfire.io/gpg.key | apt-key add - && \ echo "deb http://packages.blackfire.io/debian any main" | tee /etc/apt/sources.list.d/blackfire.list && \ apt-get update -yqq && \ apt-get install blackfire-agent \ @@ -352,6 +406,37 @@ RUN if [ ${INSTALL_AMQP} = true ]; then \ ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/amqp.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-amqp.ini \ ;fi +########################################################################### +# CASSANDRA: +########################################################################### + +ARG INSTALL_CASSANDRA=false + +RUN if [ ${INSTALL_CASSANDRA} = true ]; then \ + apt-get install libgmp-dev -y && \ + curl https://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies/libuv/v1.28.0/libuv1-dev_1.28.0-1_amd64.deb -o libuv1-dev.deb && \ + curl https://downloads.datastax.com/cpp-driver/ubuntu/18.04/dependencies/libuv/v1.28.0/libuv1_1.28.0-1_amd64.deb -o libuv1.deb && \ + curl https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.12.0/cassandra-cpp-driver-dev_2.12.0-1_amd64.deb -o cassandra-cpp-driver-dev.deb && \ + curl https://downloads.datastax.com/cpp-driver/ubuntu/18.04/cassandra/v2.12.0/cassandra-cpp-driver_2.12.0-1_amd64.deb -o cassandra-cpp-driver.deb && \ + dpkg -i libuv1.deb && \ + dpkg -i libuv1-dev.deb && \ + dpkg -i cassandra-cpp-driver.deb && \ + dpkg -i cassandra-cpp-driver-dev.deb && \ + rm libuv1.deb libuv1-dev.deb cassandra-cpp-driver-dev.deb cassandra-cpp-driver.deb && \ + cd /usr/src && \ + git clone https://github.com/datastax/php-driver.git && \ + cd /usr/src/php-driver/ext && \ + phpize && \ + mkdir /usr/src/php-driver/build && \ + cd /usr/src/php-driver/build && \ + ../ext/configure > /dev/null && \ + make clean >/dev/null && \ + make >/dev/null 2>&1 && \ + make install && \ + echo "extension=cassandra.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/cassandra.ini && \ + ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/cassandra.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-cassandra.ini \ +;fi + ########################################################################### # PHP REDIS EXTENSION ########################################################################### @@ -359,10 +444,7 @@ RUN if [ ${INSTALL_AMQP} = true ]; then \ ARG INSTALL_PHPREDIS=false RUN if [ ${INSTALL_PHPREDIS} = true ]; then \ - # Install Php Redis extension - printf "\n" | pecl -q install -o -f redis && \ - echo "extension=redis.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/redis.ini && \ - phpenmod redis \ + apt-get install -yqq php-redis \ ;fi ########################################################################### @@ -374,7 +456,7 @@ ARG INSTALL_SWOOLE=false RUN if [ ${INSTALL_SWOOLE} = true ]; then \ # Install Php Swoole Extension if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ - pecl -q install swoole-2.0.11; \ + pecl -q install swoole-2.0.10; \ else \ if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \ pecl install swoole-2.2.0; \ @@ -384,6 +466,23 @@ RUN if [ ${INSTALL_SWOOLE} = true ]; then \ fi && \ echo "extension=swoole.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/swoole.ini && \ ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/swoole.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-swoole.ini \ + && php -m | grep -q 'swoole' \ +;fi + +########################################################################### +# Taint EXTENSION +########################################################################### + +ARG INSTALL_TAINT=false + +RUN if [ "${INSTALL_TAINT}" = true ]; then \ + # Install Php TAINT Extension + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ]; then \ + pecl install taint && \ + echo "extension=taint.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/taint.ini && \ + ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/taint.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-taint.ini && \ + php -m | grep -q 'taint'; \ + fi \ ;fi ########################################################################### @@ -397,6 +496,31 @@ RUN if [ ${INSTALL_LIBPNG} = true ]; then \ apt-get install libpng16-16 \ ;fi +########################################################################### +# Inotify EXTENSION: +########################################################################### + +ARG INSTALL_INOTIFY=false + +RUN if [ ${INSTALL_INOTIFY} = true ]; then \ + pecl -q install inotify && \ + echo "extension=inotify.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini && \ + ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-inotify.ini \ +;fi + +########################################################################### +# fswatch +########################################################################### + +ARG INSTALL_FSWATCH=false + +RUN if [ ${INSTALL_FSWATCH} = true ]; then \ + apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 47FE03C1 \ + && add-apt-repository -y ppa:hadret/fswatch \ + || apt-get update -yqq \ + && apt-get -y install fswatch \ +;fi + ########################################################################### # IonCube Loader ########################################################################### @@ -440,6 +564,7 @@ ARG INSTALL_NODE=false ARG INSTALL_NPM_GULP=false ARG INSTALL_NPM_BOWER=false ARG INSTALL_NPM_VUE_CLI=false +ARG INSTALL_NPM_ANGULAR_CLI=false ARG NPM_REGISTRY ENV NPM_REGISTRY ${NPM_REGISTRY} ENV NVM_DIR /home/laradock/.nvm @@ -464,6 +589,9 @@ RUN if [ ${INSTALL_NODE} = true ]; then \ && if [ ${INSTALL_NPM_VUE_CLI} = true ]; then \ npm install -g @vue/cli \ ;fi \ + && if [ ${INSTALL_NPM_ANGULAR_CLI} = true ]; then \ + npm install -g @angular/cli \ + ;fi \ && ln -s `npm bin --global` /home/laradock/.node-bin \ ;fi @@ -545,26 +673,30 @@ ENV PATH $PATH:/home/laradock/.yarn/bin USER root ARG INSTALL_AEROSPIKE=false -ARG AEROSPIKE_PHP_REPOSITORY -RUN if [ ${INSTALL_AEROSPIKE} = true ]; then \ +RUN set -xe; \ + if [ ${INSTALL_AEROSPIKE} = true ]; then \ # Fix dependencies for PHPUnit within aerospike extension apt-get -y install sudo wget && \ # Install the php aerospike extension - curl -L -o /tmp/aerospike-client-php.tar.gz ${AEROSPIKE_PHP_REPOSITORY} \ - && mkdir -p aerospike-client-php \ - && tar -C aerospike-client-php -zxvf /tmp/aerospike-client-php.tar.gz --strip 1 \ + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ + curl -L -o /tmp/aerospike-client-php.tar.gz https://github.com/aerospike/aerospike-client-php5/archive/master.tar.gz; \ + else \ + curl -L -o /tmp/aerospike-client-php.tar.gz https://github.com/aerospike/aerospike-client-php/archive/master.tar.gz; \ + fi \ + && mkdir -p /tmp/aerospike-client-php \ + && tar -C /tmp/aerospike-client-php -zxvf /tmp/aerospike-client-php.tar.gz --strip 1 \ && \ if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ ( \ - cd aerospike-client-php/src/aerospike \ + cd /tmp/aerospike-client-php/src/aerospike \ && phpize \ && ./build.sh \ && make install \ ) \ else \ ( \ - cd aerospike-client-php/src \ + cd /tmp/aerospike-client-php/src \ && phpize \ && ./build.sh \ && make install \ @@ -574,7 +706,7 @@ RUN if [ ${INSTALL_AEROSPIKE} = true ]; then \ && echo 'extension=aerospike.so' >> /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/aerospike.ini \ && echo 'aerospike.udf.lua_system_path=/usr/local/aerospike/lua' >> /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/aerospike.ini \ && echo 'aerospike.udf.lua_user_path=/usr/local/aerospike/usr-lua' >> /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/aerospike.ini \ -;fi + ;fi ########################################################################### # PHP V8JS: @@ -584,14 +716,19 @@ USER root ARG INSTALL_V8JS=false -RUN if [ ${INSTALL_V8JS} = true ]; then \ - # Install the php V8JS extension +RUN set -xe; \ + if [ ${INSTALL_V8JS} = true ]; then \ add-apt-repository -y ppa:pinepain/libv8-archived \ && apt-get update -yqq \ - && apt-get install -y php${LARADOCK_PHP_VERSION}-xml php${LARADOCK_PHP_VERSION}-dev php-pear libv8-5.4 \ - && pecl install v8js \ + && apt-get install -y libv8-5.4 && \ + if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ + pecl install v8js-0.6.4; \ + else \ + pecl install v8js; \ + fi \ && echo "extension=v8js.so" >> /etc/php/${LARADOCK_PHP_VERSION}/cli/php.ini \ -;fi + && php -m | grep -q 'v8js' \ + ;fi ########################################################################### # Laravel Envoy: @@ -612,6 +749,15 @@ RUN if [ ${INSTALL_LARAVEL_ENVOY} = true ]; then \ USER laradock +ARG INSTALL_LARAVEL_INSTALLER=false + +RUN if [ ${INSTALL_LARAVEL_INSTALLER} = true ]; then \ + # Install the Laravel Installer + composer global require "laravel/installer" \ +;fi + +USER root + ARG COMPOSER_REPO_PACKAGIST ENV COMPOSER_REPO_PACKAGIST ${COMPOSER_REPO_PACKAGIST} @@ -689,7 +835,8 @@ RUN if [ ${INSTALL_LINUXBREW} = true ]; then \ ARG INSTALL_MSSQL=false -RUN set -eux; if [ ${INSTALL_MSSQL} = true ]; then \ +RUN set -eux; \ + if [ ${INSTALL_MSSQL} = true ]; then \ if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \ apt-get -y install php5.6-sybase freetds-bin freetds-common libsybdb5 \ && php -m | grep -q 'mssql' \ @@ -707,13 +854,17 @@ RUN set -eux; if [ ${INSTALL_MSSQL} = true ]; then \ ln -sfn /opt/mssql-tools/bin/bcp /usr/bin/bcp && \ echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && \ locale-gen && \ - pecl install sqlsrv pdo_sqlsrv && \ + if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \ + pecl install sqlsrv-5.3.0 pdo_sqlsrv-5.3.0 \ + ;else \ + pecl install sqlsrv pdo_sqlsrv \ + ;fi && \ echo "extension=sqlsrv.so" > /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-sqlsrv.ini && \ echo "extension=pdo_sqlsrv.so" > /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-pdo_sqlsrv.ini \ && php -m | grep -q 'sqlsrv' \ && php -m | grep -q 'pdo_sqlsrv' \ ;fi \ -;fi + ;fi ########################################################################### # Minio: @@ -779,6 +930,23 @@ RUN if [ ${INSTALL_PYTHON} = true ]; then \ && python -m pip install --upgrade virtualenv \ ;fi +########################################################################### +# POWERLINE: +########################################################################### + +USER root +ARG INSTALL_POWERLINE=false + +RUN if [ ${INSTALL_POWERLINE} = true ]; then \ + if [ ${INSTALL_PYTHON} = true ]; then \ + python -m pip install --upgrade powerline-status && \ + echo "" >> /etc/bash.bashrc && \ + echo ". /usr/local/lib/python2.7/dist-packages/powerline/bindings/bash/powerline.sh" >> /etc/bash.bashrc \ + ;fi \ +;fi + +USER laradock + ########################################################################### # ImageMagick: ########################################################################### @@ -823,15 +991,6 @@ RUN if [ ${INSTALL_PG_CLIENT} = true ]; then \ && apt-get -y install postgresql-client-10 \ ;fi -########################################################################### -# nasm -########################################################################### - -USER root - -RUN apt-get update -yqq \ - && apt-get -yqq install nasm - ########################################################################### # Dusk Dependencies: ########################################################################### @@ -890,11 +1049,62 @@ RUN if [ ${INSTALL_MYSQL_CLIENT} = true ]; then \ apt-get -y install mysql-client \ ;fi +########################################################################### +# ping: +########################################################################### + +USER root + +ARG INSTALL_PING=false + +RUN if [ ${INSTALL_PING} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install inetutils-ping \ +;fi + +########################################################################### +# sshpass: +########################################################################### + +USER root + +ARG INSTALL_SSHPASS=false + +RUN if [ ${INSTALL_SSHPASS} = true ]; then \ + apt-get update -yqq && \ + apt-get -y install sshpass \ +;fi + +########################################################################### +# FFMpeg: +########################################################################### + +USER root + +ARG INSTALL_FFMPEG=false + +RUN if [ ${INSTALL_FFMPEG} = true ]; then \ + apt-get -y install ffmpeg \ +;fi + +########################################################################### +# GNU Parallel: +########################################################################### + +USER root + +ARG INSTALL_GNU_PARALLEL=false + +RUN if [ ${INSTALL_GNU_PARALLEL} = true ]; then \ + apt-get -y install parallel \ +;fi + + ########################################################################### # Check PHP version: ########################################################################### -RUN php -v | head -n 1 | grep -q "PHP ${LARADOCK_PHP_VERSION}." +RUN set -xe; php -v | head -n 1 | grep -q "PHP ${LARADOCK_PHP_VERSION}." # #-------------------------------------------------------------------------- diff --git a/workspace/aliases.sh b/workspace/aliases.sh index 6cb13574..0bf50731 100644 --- a/workspace/aliases.sh +++ b/workspace/aliases.sh @@ -46,8 +46,8 @@ alias h="history" alias j="jobs" alias e='exit' alias c="clear" -alias cla="clear && ls -l" -alias cll="clear && ls -la" +alias cla="clear && ls -la" +alias cll="clear && ls -l" alias cls="clear && ls" alias code="cd /var/www" alias ea="vi ~/aliases.sh" @@ -107,6 +107,13 @@ alias gd="git --no-pager diff" alias git-revert="git reset --hard && git clean -df" alias gs="git status" alias whoops="git reset --hard && git clean -df" +alias glog="git log --oneline --decorate --graph" +alias gloga="git log --oneline --decorate --graph --all" +alias gsh="git show" +alias grb="git rebase -i" +alias gbr="git branch" +alias gc="git commit" +alias gck="git checkout" # Create a new directory and enter it function mkd() { diff --git a/workspace/auth.json b/workspace/auth.json new file mode 100644 index 00000000..03cde45f --- /dev/null +++ b/workspace/auth.json @@ -0,0 +1,8 @@ +{ + "http-basic": { + "repo.magento.com": { + "username": "", + "password": "" + } + } +} diff --git a/zookeeper/Dockerfile b/zookeeper/Dockerfile new file mode 100644 index 00000000..3fc8abd7 --- /dev/null +++ b/zookeeper/Dockerfile @@ -0,0 +1,10 @@ +FROM zookeeper:latest + +LABEL maintainer="Hyduan " + +VOLUME /data +VOLUME /datalog + +EXPOSE 2181 + +CMD ["zkServer.sh", "start-foreground"]