commit
77d5a7be8b
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
||||
*.sh text eol=lf
|
@ -196,6 +196,7 @@ More [options](https://docs.docker.com/compose/reference/logs/)
|
||||
|
||||
|
||||
<br>
|
||||
|
||||
<a name="PHP"></a>
|
||||
|
||||
|
||||
@ -206,11 +207,14 @@ More [options](https://docs.docker.com/compose/reference/logs/)
|
||||
<a name="Install-PHP-Extensions"></a>
|
||||
## Install PHP Extensions
|
||||
|
||||
Before installing PHP extensions, you have to decide first whether you need `FPM` or `CLI`, because each of them has it's own different container, if you need it for both, you have to edit both containers.
|
||||
You can set extensions to install in the .env file's corresponding section (`PHP_FPM`, `WORKSPACE`, `PHP_WORKER`),
|
||||
just change the `false` to `true` at the desired extension's line.
|
||||
After this you have to rebuild the container with the `--no-cache` option.
|
||||
|
||||
```bash
|
||||
docker build --no-cache {container-name}
|
||||
```
|
||||
|
||||
The PHP-FPM extensions should be installed in `php-fpm/Dockerfile-XX`. *(replace XX with your default PHP version number)*.
|
||||
<br>
|
||||
The PHP-CLI extensions should be installed in `workspace/Dockerfile`.
|
||||
|
||||
|
||||
|
||||
@ -218,8 +222,10 @@ The PHP-CLI extensions should be installed in `workspace/Dockerfile`.
|
||||
|
||||
|
||||
<br>
|
||||
|
||||
<a name="Change-the-PHP-FPM-Version"></a>
|
||||
## Change the (PHP-FPM) Version
|
||||
|
||||
By default the latest stable PHP version is configured to run.
|
||||
|
||||
>The PHP-FPM is responsible for serving your application code, you don't have to change the PHP-CLI version if you are planning to run your application on different PHP-FPM version.
|
||||
@ -1335,12 +1341,26 @@ docker-compose up -d minio
|
||||
|
||||
5 - When configuring your other clients use the following details:
|
||||
```
|
||||
S3_HOST=http://minio
|
||||
S3_KEY=access
|
||||
S3_SECRET=secretkey
|
||||
S3_REGION=us-east-1
|
||||
S3_BUCKET=bucket
|
||||
AWS_URL=http://minio:9000
|
||||
AWS_ACCESS_KEY_ID=access
|
||||
AWS_SECRET_ACCESS_KEY=secretkey
|
||||
AWS_DEFAULT_REGION=us-east-1
|
||||
AWS_BUCKET=test
|
||||
AWS_PATH_STYLE=true
|
||||
```
|
||||
6 - In `filesystems.php` you shoud use the following details (s3):
|
||||
```
|
||||
's3' => [
|
||||
'driver' => 's3',
|
||||
'key' => env('AWS_ACCESS_KEY_ID'),
|
||||
'secret' => env('AWS_SECRET_ACCESS_KEY'),
|
||||
'region' => env('AWS_DEFAULT_REGION'),
|
||||
'bucket' => env('AWS_BUCKET'),
|
||||
'endpoint' => env('AWS_URL'),
|
||||
'use_path_style_endpoint' => env('AWS_PATH_STYLE', false)
|
||||
],
|
||||
```
|
||||
`'AWS_PATH_STYLE'` shout set to true only for local purpouse
|
||||
|
||||
|
||||
|
||||
@ -1887,6 +1907,7 @@ To install NVM and NodeJS in the Workspace container
|
||||
|
||||
3 - Re-build the container `docker-compose build workspace`
|
||||
|
||||
A `.npmrc` file is included in the `workspace` folder if you need to utilise this globally. This is copied automatically into the root and laradock user's folders on build.
|
||||
|
||||
|
||||
<br>
|
||||
|
53
clickhouse/Dockerfile
Normal file
53
clickhouse/Dockerfile
Normal file
@ -0,0 +1,53 @@
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ARG CLICKHOUSE_VERSION=20.9.4.76
|
||||
ARG CLICKHOUSE_GOSU_VERSION=1.10
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --yes --no-install-recommends \
|
||||
apt-transport-https \
|
||||
dirmngr \
|
||||
gnupg \
|
||||
&& mkdir -p /etc/apt/sources.list.d \
|
||||
&& apt-key adv --keyserver keyserver.ubuntu.com --recv E0C56BD4 \
|
||||
&& echo "deb http://repo.yandex.ru/clickhouse/deb/stable/ main/" > /etc/apt/sources.list.d/clickhouse.list \
|
||||
&& apt-get update \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --allow-unauthenticated --yes --no-install-recommends \
|
||||
clickhouse-common-static=$CLICKHOUSE_VERSION \
|
||||
clickhouse-client=$CLICKHOUSE_VERSION \
|
||||
clickhouse-server=$CLICKHOUSE_VERSION \
|
||||
locales \
|
||||
tzdata \
|
||||
wget \
|
||||
&& rm -rf \
|
||||
/var/lib/apt/lists/* \
|
||||
/var/cache/debconf \
|
||||
/tmp/* \
|
||||
&& apt-get clean
|
||||
|
||||
ADD https://github.com/tianon/gosu/releases/download/$CLICKHOUSE_GOSU_VERSION/gosu-amd64 /bin/gosu
|
||||
|
||||
RUN locale-gen en_US.UTF-8
|
||||
ENV LANG en_US.UTF-8
|
||||
ENV LANGUAGE en_US:en
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
||||
RUN mkdir /docker-entrypoint-initdb.d
|
||||
|
||||
COPY docker_related_config.xml /etc/clickhouse-server/config.d/
|
||||
COPY config.xml /etc/clickhouse-server/config.xml
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN chmod +x \
|
||||
/entrypoint.sh \
|
||||
/bin/gosu
|
||||
|
||||
EXPOSE 9000 8123 9009
|
||||
VOLUME /var/lib/clickhouse
|
||||
|
||||
ENV CLICKHOUSE_CONFIG /etc/clickhouse-server/config.xml
|
||||
ENV CLICKHOUSE_USER ${CLICKHOUSE_USER}
|
||||
ENV CLICKHOUSE_PASSWORD ${CLICKHOUSE_PASSWORD}
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
510
clickhouse/config.xml
Normal file
510
clickhouse/config.xml
Normal file
@ -0,0 +1,510 @@
|
||||
<?xml version="1.0"?>
|
||||
<!--
|
||||
NOTE: User and query level settings are set up in "users.xml" file.
|
||||
-->
|
||||
<yandex>
|
||||
<!-- The list of hosts allowed to use in URL-related storage engines and table functions.
|
||||
If this section is not present in configuration, all hosts are allowed.
|
||||
-->
|
||||
<remote_url_allow_hosts>
|
||||
<!-- Host should be specified exactly as in URL. The name is checked before DNS resolution.
|
||||
Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
|
||||
If port is explicitly specified in URL, the host:port is checked as a whole.
|
||||
If host specified here without port, any port with this host allowed.
|
||||
"yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
|
||||
If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
|
||||
If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
|
||||
-->
|
||||
|
||||
<!-- Regular expression can be specified. RE2 engine is used for regexps.
|
||||
Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
|
||||
(forgetting to do so is a common source of error).
|
||||
-->
|
||||
</remote_url_allow_hosts>
|
||||
|
||||
<logger>
|
||||
<!-- Possible levels: https://github.com/pocoproject/poco/blob/develop/Foundation/include/Poco/Logger.h#L105 -->
|
||||
<level>trace</level>
|
||||
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
|
||||
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
|
||||
<size>1000M</size>
|
||||
<count>10</count>
|
||||
<!-- <console>1</console> --> <!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
|
||||
</logger>
|
||||
<!--display_name>production</display_name--> <!-- It is the name that will be shown in the client -->
|
||||
<http_port>8123</http_port>
|
||||
<tcp_port>9000</tcp_port>
|
||||
<!-- For HTTPS and SSL over native protocol. -->
|
||||
<!--
|
||||
<https_port>8443</https_port>
|
||||
<tcp_port_secure>9440</tcp_port_secure>
|
||||
-->
|
||||
|
||||
<!-- Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71 -->
|
||||
<openSSL>
|
||||
<server> <!-- Used for https server AND secure tcp port -->
|
||||
<!-- openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt -->
|
||||
<certificateFile>/etc/clickhouse-server/server.crt</certificateFile>
|
||||
<privateKeyFile>/etc/clickhouse-server/server.key</privateKeyFile>
|
||||
<!-- openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096 -->
|
||||
<dhParamsFile>/etc/clickhouse-server/dhparam.pem</dhParamsFile>
|
||||
<verificationMode>none</verificationMode>
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
</server>
|
||||
|
||||
<client> <!-- Used for connecting to https dictionary source -->
|
||||
<loadDefaultCAFile>true</loadDefaultCAFile>
|
||||
<cacheSessions>true</cacheSessions>
|
||||
<disableProtocols>sslv2,sslv3</disableProtocols>
|
||||
<preferServerCiphers>true</preferServerCiphers>
|
||||
<!-- Use for self-signed: <verificationMode>none</verificationMode> -->
|
||||
<invalidCertificateHandler>
|
||||
<!-- Use for self-signed: <name>AcceptCertificateHandler</name> -->
|
||||
<name>RejectCertificateHandler</name>
|
||||
</invalidCertificateHandler>
|
||||
</client>
|
||||
</openSSL>
|
||||
|
||||
<!-- Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123 -->
|
||||
<!--
|
||||
<http_server_default_response><![CDATA[<html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>]]></http_server_default_response>
|
||||
-->
|
||||
|
||||
<!-- Port for communication between replicas. Used for data exchange. -->
|
||||
<interserver_http_port>9009</interserver_http_port>
|
||||
|
||||
<!-- Hostname that is used by other replicas to request this server.
|
||||
If not specified, than it is determined analoguous to 'hostname -f' command.
|
||||
This setting could be used to switch replication to another network interface.
|
||||
-->
|
||||
<!--
|
||||
<interserver_http_host>example.yandex.ru</interserver_http_host>
|
||||
-->
|
||||
|
||||
<!-- Listen specified host. use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere. -->
|
||||
<!-- <listen_host>::</listen_host> -->
|
||||
<!-- Same for hosts with disabled ipv6: -->
|
||||
<!-- <listen_host>0.0.0.0</listen_host> -->
|
||||
|
||||
<!-- Default values - try listen localhost on ipv4 and ipv6: -->
|
||||
<!--
|
||||
<listen_host>::1</listen_host>
|
||||
<listen_host>127.0.0.1</listen_host>
|
||||
-->
|
||||
<!-- Don't exit if ipv6 or ipv4 unavailable, but listen_host with this protocol specified -->
|
||||
<!-- <listen_try>0</listen_try> -->
|
||||
|
||||
<!-- Allow listen on same address:port -->
|
||||
<!-- <listen_reuse_port>0</listen_reuse_port> -->
|
||||
|
||||
<!-- <listen_backlog>64</listen_backlog> -->
|
||||
|
||||
<max_connections>4096</max_connections>
|
||||
<keep_alive_timeout>3</keep_alive_timeout>
|
||||
|
||||
<!-- Maximum number of concurrent queries. -->
|
||||
<max_concurrent_queries>100</max_concurrent_queries>
|
||||
|
||||
<!-- Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
|
||||
correct maximum value. -->
|
||||
<!-- <max_open_files>262144</max_open_files> -->
|
||||
|
||||
<!-- Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
|
||||
Uncompressed cache is advantageous only for very short queries and in rare cases.
|
||||
-->
|
||||
<uncompressed_cache_size>8589934592</uncompressed_cache_size>
|
||||
|
||||
<!-- Approximate size of mark cache, used in tables of MergeTree family.
|
||||
In bytes. Cache is single for server. Memory is allocated only on demand.
|
||||
You should not lower this value.
|
||||
-->
|
||||
<mark_cache_size>5368709120</mark_cache_size>
|
||||
|
||||
|
||||
<!-- Path to data directory, with trailing slash. -->
|
||||
<path>/var/lib/clickhouse/</path>
|
||||
|
||||
<!-- Path to temporary data for processing hard queries. -->
|
||||
<tmp_path>/var/lib/clickhouse/tmp/</tmp_path>
|
||||
|
||||
<!-- Directory with user provided files that are accessible by 'file' table function. -->
|
||||
<user_files_path>/var/lib/clickhouse/user_files/</user_files_path>
|
||||
|
||||
<!-- Path to configuration file with users, access rights, profiles of settings, quotas. -->
|
||||
<users_config>users.xml</users_config>
|
||||
|
||||
<!-- Default profile of settings. -->
|
||||
<default_profile>default</default_profile>
|
||||
|
||||
<!-- System profile of settings. This settings are used by internal processes (Buffer storage, Distibuted DDL worker and so on). -->
|
||||
<!-- <system_profile>default</system_profile> -->
|
||||
|
||||
<!-- Default database. -->
|
||||
<default_database>default</default_database>
|
||||
|
||||
<!-- Server time zone could be set here.
|
||||
|
||||
Time zone is used when converting between String and DateTime types,
|
||||
when printing DateTime in text formats and parsing DateTime from text,
|
||||
it is used in date and time related functions, if specific time zone was not passed as an argument.
|
||||
|
||||
Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
|
||||
If not specified, system time zone at server startup is used.
|
||||
|
||||
Please note, that server could display time zone alias instead of specified name.
|
||||
Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
|
||||
-->
|
||||
<!-- <timezone>Europe/Moscow</timezone> -->
|
||||
|
||||
<!-- You can specify umask here (see "man umask"). Server will apply it on startup.
|
||||
Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
|
||||
-->
|
||||
<!-- <umask>022</umask> -->
|
||||
|
||||
<!-- Perform mlockall after startup to lower first queries latency
|
||||
and to prevent clickhouse executable from being paged out under high IO load.
|
||||
Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
|
||||
-->
|
||||
<mlock_executable>false</mlock_executable>
|
||||
|
||||
<!-- Configuration of clusters that could be used in Distributed tables.
|
||||
https://clickhouse.yandex/docs/en/table_engines/distributed/
|
||||
-->
|
||||
<remote_servers incl="clickhouse_remote_servers" >
|
||||
<!-- Test only shard config for testing distributed storage -->
|
||||
<test_shard_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost>
|
||||
<test_cluster_two_shards_localhost>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards_localhost>
|
||||
<test_cluster_two_shards>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.1</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>127.0.0.2</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_cluster_two_shards>
|
||||
<test_shard_localhost_secure>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9440</port>
|
||||
<secure>1</secure>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_shard_localhost_secure>
|
||||
<test_unavailable_shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>9000</port>
|
||||
</replica>
|
||||
</shard>
|
||||
<shard>
|
||||
<replica>
|
||||
<host>localhost</host>
|
||||
<port>1</port>
|
||||
</replica>
|
||||
</shard>
|
||||
</test_unavailable_shard>
|
||||
</remote_servers>
|
||||
|
||||
|
||||
<!-- If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
|
||||
By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
|
||||
Values for substitutions are specified in /yandex/name_of_substitution elements in that file.
|
||||
-->
|
||||
|
||||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/
|
||||
-->
|
||||
|
||||
<zookeeper incl="zookeeper-servers" optional="true" />
|
||||
|
||||
<!-- Substitutions for parameters of replicated tables.
|
||||
Optional. If you don't use replicated tables, you could omit that.
|
||||
|
||||
See https://clickhouse.yandex/docs/en/table_engines/replication/#creating-replicated-tables
|
||||
-->
|
||||
<macros incl="macros" optional="true" />
|
||||
|
||||
|
||||
<!-- Reloading interval for embedded dictionaries, in seconds. Default: 3600. -->
|
||||
<builtin_dictionaries_reload_interval>3600</builtin_dictionaries_reload_interval>
|
||||
|
||||
|
||||
<!-- Maximum session timeout, in seconds. Default: 3600. -->
|
||||
<max_session_timeout>3600</max_session_timeout>
|
||||
|
||||
<!-- Default session timeout, in seconds. Default: 60. -->
|
||||
<default_session_timeout>60</default_session_timeout>
|
||||
|
||||
<!-- Sending data to Graphite for monitoring. Several sections can be defined. -->
|
||||
<!--
|
||||
interval - send every X second
|
||||
root_path - prefix for keys
|
||||
hostname_in_path - append hostname to root_path (default = true)
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>60</interval>
|
||||
<root_path>one_min</root_path>
|
||||
<hostname_in_path>true</hostname_in_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<events_cumulative>false</events_cumulative>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</graphite>
|
||||
<graphite>
|
||||
<host>localhost</host>
|
||||
<port>42000</port>
|
||||
<timeout>0.1</timeout>
|
||||
<interval>1</interval>
|
||||
<root_path>one_sec</root_path>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<events_cumulative>false</events_cumulative>
|
||||
<asynchronous_metrics>false</asynchronous_metrics>
|
||||
</graphite>
|
||||
-->
|
||||
|
||||
<!-- Serve endpoint fot Prometheus monitoring. -->
|
||||
<!--
|
||||
endpoint - mertics path (relative to root, statring with "/")
|
||||
port - port to setup server. If not defined or 0 than http_port used
|
||||
metrics - send data from table system.metrics
|
||||
events - send data from table system.events
|
||||
asynchronous_metrics - send data from table system.asynchronous_metrics
|
||||
-->
|
||||
<!--
|
||||
<prometheus>
|
||||
<endpoint>/metrics</endpoint>
|
||||
<port>9363</port>
|
||||
|
||||
<metrics>true</metrics>
|
||||
<events>true</events>
|
||||
<asynchronous_metrics>true</asynchronous_metrics>
|
||||
</prometheus>
|
||||
-->
|
||||
|
||||
<!-- Query log. Used only for queries with setting log_queries = 1. -->
|
||||
<query_log>
|
||||
<!-- What table to insert data. If table is not exist, it will be created.
|
||||
When query log structure is changed after system update,
|
||||
then old table will be renamed and new table will be created automatically.
|
||||
-->
|
||||
<database>system</database>
|
||||
<table>query_log</table>
|
||||
<!--
|
||||
PARTITION BY expr https://clickhouse.yandex/docs/en/table_engines/custom_partitioning_key/
|
||||
Example:
|
||||
event_date
|
||||
toMonday(event_date)
|
||||
toYYYYMM(event_date)
|
||||
toStartOfHour(event_time)
|
||||
-->
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<!-- Interval of flushing data. -->
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_log>
|
||||
|
||||
<!-- Trace log. Stores stack traces collected by query profilers.
|
||||
See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings. -->
|
||||
<trace_log>
|
||||
<database>system</database>
|
||||
<table>trace_log</table>
|
||||
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</trace_log>
|
||||
|
||||
<!-- Query thread log. Has information about all threads participated in query execution.
|
||||
Used only for queries with setting log_query_threads = 1. -->
|
||||
<query_thread_log>
|
||||
<database>system</database>
|
||||
<table>query_thread_log</table>
|
||||
<partition_by>toYYYYMM(event_date)</partition_by>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</query_thread_log>
|
||||
|
||||
<!-- Uncomment if use part log.
|
||||
Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
|
||||
<part_log>
|
||||
<database>system</database>
|
||||
<table>part_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</part_log>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to write text log into table.
|
||||
Text log contains all information from usual server log but stores it in structured and efficient way.
|
||||
<text_log>
|
||||
<database>system</database>
|
||||
<table>text_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
</text_log>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to write metric log into table.
|
||||
Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval.
|
||||
<metric_log>
|
||||
<database>system</database>
|
||||
<table>metric_log</table>
|
||||
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
|
||||
<collect_interval_milliseconds>1000</collect_interval_milliseconds>
|
||||
</metric_log>
|
||||
-->
|
||||
|
||||
<!-- Parameters for embedded dictionaries, used in Yandex.Metrica.
|
||||
See https://clickhouse.yandex/docs/en/dicts/internal_dicts/
|
||||
-->
|
||||
|
||||
<!-- Path to file with region hierarchy. -->
|
||||
<!-- <path_to_regions_hierarchy_file>/opt/geo/regions_hierarchy.txt</path_to_regions_hierarchy_file> -->
|
||||
|
||||
<!-- Path to directory with files containing names of regions -->
|
||||
<!-- <path_to_regions_names_files>/opt/geo/</path_to_regions_names_files> -->
|
||||
|
||||
|
||||
<!-- Configuration of external dictionaries. See:
|
||||
https://clickhouse.yandex/docs/en/dicts/external_dicts/
|
||||
-->
|
||||
<dictionaries_config>*_dictionary.xml</dictionaries_config>
|
||||
|
||||
<!-- Uncomment if you want data to be compressed 30-100% better.
|
||||
Don't do that if you just started using ClickHouse.
|
||||
-->
|
||||
<compression incl="clickhouse_compression">
|
||||
<!--
|
||||
<!- - Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used. - ->
|
||||
<case>
|
||||
|
||||
<!- - Conditions. All must be satisfied. Some conditions may be omitted. - ->
|
||||
<min_part_size>10000000000</min_part_size> <!- - Min part size in bytes. - ->
|
||||
<min_part_size_ratio>0.01</min_part_size_ratio> <!- - Min size of part relative to whole table size. - ->
|
||||
|
||||
<!- - What compression method to use. - ->
|
||||
<method>zstd</method>
|
||||
</case>
|
||||
-->
|
||||
</compression>
|
||||
|
||||
<!-- Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
|
||||
Works only if ZooKeeper is enabled. Comment it if such functionality isn't required. -->
|
||||
<distributed_ddl>
|
||||
<!-- Path in ZooKeeper to queue with DDL queries -->
|
||||
<path>/clickhouse/task_queue/ddl</path>
|
||||
|
||||
<!-- Settings from this profile will be used to execute DDL queries -->
|
||||
<!-- <profile>default</profile> -->
|
||||
</distributed_ddl>
|
||||
|
||||
<!-- Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h -->
|
||||
<!--
|
||||
<merge_tree>
|
||||
<max_suspicious_broken_parts>5</max_suspicious_broken_parts>
|
||||
</merge_tree>
|
||||
-->
|
||||
|
||||
<!-- Protection from accidental DROP.
|
||||
If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
|
||||
If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
|
||||
By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
|
||||
The same for max_partition_size_to_drop.
|
||||
Uncomment to disable protection.
|
||||
-->
|
||||
<!-- <max_table_size_to_drop>0</max_table_size_to_drop> -->
|
||||
<!-- <max_partition_size_to_drop>0</max_partition_size_to_drop> -->
|
||||
|
||||
<!-- Example of parameters for GraphiteMergeTree table engine -->
|
||||
<graphite_rollup_example>
|
||||
<pattern>
|
||||
<regexp>click_cost</regexp>
|
||||
<function>any</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
</pattern>
|
||||
<default>
|
||||
<function>max</function>
|
||||
<retention>
|
||||
<age>0</age>
|
||||
<precision>60</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>3600</age>
|
||||
<precision>300</precision>
|
||||
</retention>
|
||||
<retention>
|
||||
<age>86400</age>
|
||||
<precision>3600</precision>
|
||||
</retention>
|
||||
</default>
|
||||
</graphite_rollup_example>
|
||||
|
||||
<!-- Directory in <clickhouse-path> containing schema files for various input formats.
|
||||
The directory will be created if it doesn't exist.
|
||||
-->
|
||||
<format_schema_path>/var/lib/clickhouse/format_schemas/</format_schema_path>
|
||||
|
||||
|
||||
<!-- Uncomment to use query masking rules.
|
||||
name - name for the rule (optional)
|
||||
regexp - RE2 compatible regular expression (mandatory)
|
||||
replace - substitution string for sensitive data (optional, by default - six asterisks)
|
||||
<query_masking_rules>
|
||||
<rule>
|
||||
<name>hide SSN</name>
|
||||
<regexp>\b\d{3}-\d{2}-\d{4}\b</regexp>
|
||||
<replace>000-00-0000</replace>
|
||||
</rule>
|
||||
</query_masking_rules>
|
||||
-->
|
||||
|
||||
<!-- Uncomment to disable ClickHouse internal DNS caching. -->
|
||||
<!-- <disable_internal_dns_cache>1</disable_internal_dns_cache> -->
|
||||
</yandex>
|
0
clickhouse/docker-entrypoint-initdb.d/init-db.sh
Normal file
0
clickhouse/docker-entrypoint-initdb.d/init-db.sh
Normal file
12
clickhouse/docker_related_config.xml
Normal file
12
clickhouse/docker_related_config.xml
Normal file
@ -0,0 +1,12 @@
|
||||
<yandex>
|
||||
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
|
||||
<listen_host>::</listen_host>
|
||||
<listen_host>0.0.0.0</listen_host>
|
||||
<listen_try>1</listen_try>
|
||||
|
||||
<!--
|
||||
<logger>
|
||||
<console>1</console>
|
||||
</logger>
|
||||
-->
|
||||
</yandex>
|
103
clickhouse/entrypoint.sh
Normal file
103
clickhouse/entrypoint.sh
Normal file
@ -0,0 +1,103 @@
|
||||
#!/bin/bash
|
||||
|
||||
# set some vars
|
||||
CLICKHOUSE_CONFIG="${CLICKHOUSE_CONFIG:-/etc/clickhouse-server/config.xml}"
|
||||
if [ x"$UID" == x0 ]; then
|
||||
USER="$(id -u clickhouse)"
|
||||
GROUP="$(id -g clickhouse)"
|
||||
gosu="gosu $USER:$GROUP"
|
||||
else
|
||||
USER="$(id -u)"
|
||||
GROUP="$(id -g)"
|
||||
gosu=""
|
||||
fi
|
||||
|
||||
# port is needed to check if clickhouse-server is ready for connections
|
||||
HTTP_PORT="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=http_port)"
|
||||
|
||||
# get CH directories locations
|
||||
DATA_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=path || true)"
|
||||
TMP_DIR="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=tmp_path || true)"
|
||||
USER_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=user_files_path || true)"
|
||||
LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.log || true)"
|
||||
LOG_DIR="$(dirname $LOG_PATH || true)"
|
||||
ERROR_LOG_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=logger.errorlog || true)"
|
||||
ERROR_LOG_DIR="$(dirname $ERROR_LOG_PATH || true)"
|
||||
FORMAT_SCHEMA_PATH="$(clickhouse extract-from-config --config-file $CLICKHOUSE_CONFIG --key=format_schema_path || true)"
|
||||
CLICKHOUSE_USER="${CLICKHOUSE_USER:-default}"
|
||||
|
||||
for dir in "$DATA_DIR" \
|
||||
"$ERROR_LOG_DIR" \
|
||||
"$LOG_DIR" \
|
||||
"$TMP_DIR" \
|
||||
"$USER_PATH" \
|
||||
"$FORMAT_SCHEMA_PATH"
|
||||
do
|
||||
# check if variable not empty
|
||||
[ -z "$dir" ] && continue
|
||||
# ensure directories exist
|
||||
if ! mkdir -p "$dir"; then
|
||||
echo "Couldn't create necessary directory: $dir"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ x"$UID" == x0 ] && [ "$CLICKHOUSE_DO_NOT_CHOWN" != "1" ]; then
|
||||
# ensure proper directories permissions
|
||||
chown -R "$USER:$GROUP" "$dir"
|
||||
elif [ "$(stat -c %u "$dir")" != "$USER" ]; then
|
||||
echo "Necessary directory '$dir' isn't owned by user with id '$USER'"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
|
||||
if [ -n "$(ls /docker-entrypoint-initdb.d/)" ]; then
|
||||
$gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG &
|
||||
pid="$!"
|
||||
|
||||
# check if clickhouse is ready to accept connections
|
||||
# will try to send ping clickhouse via http_port (max 12 retries, with 1 sec delay)
|
||||
if ! wget --spider --quiet --tries=12 --waitretry=1 --retry-connrefused "http://localhost:$HTTP_PORT/ping" ; then
|
||||
echo >&2 'ClickHouse init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -z "$CLICKHOUSE_PASSWORD" ]; then
|
||||
printf -v WITH_PASSWORD '%s %q' "--password" "$CLICKHOUSE_PASSWORD"
|
||||
fi
|
||||
|
||||
clickhouseclient=( clickhouse-client --multiquery -u $CLICKHOUSE_USER $WITH_PASSWORD )
|
||||
|
||||
echo
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh)
|
||||
if [ -x "$f" ]; then
|
||||
echo "$0: running $f"
|
||||
"$f"
|
||||
else
|
||||
echo "$0: sourcing $f"
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql) echo "$0: running $f"; cat "$f" | "${clickhouseclient[@]}" ; echo ;;
|
||||
*.sql.gz) echo "$0: running $f"; gunzip -c "$f" | "${clickhouseclient[@]}"; echo ;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
if ! kill -s TERM "$pid" || ! wait "$pid"; then
|
||||
echo >&2 'Finishing of ClickHouse init process failed.'
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# if no args passed to `docker run` or first argument start with `--`, then the user is passing clickhouse-server arguments
|
||||
if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
|
||||
exec $gosu /usr/bin/clickhouse-server --config-file=$CLICKHOUSE_CONFIG "$@"
|
||||
fi
|
||||
|
||||
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image
|
||||
exec "$@"
|
138
clickhouse/users.xml
Normal file
138
clickhouse/users.xml
Normal file
@ -0,0 +1,138 @@
|
||||
<?xml version="1.0"?>
|
||||
<yandex>
|
||||
<!-- Profiles of settings. -->
|
||||
<profiles>
|
||||
<!-- Default settings. -->
|
||||
<default>
|
||||
<!-- Maximum memory usage for processing single query, in bytes. -->
|
||||
<max_memory_usage>10000000000</max_memory_usage>
|
||||
|
||||
<!-- Use cache of uncompressed blocks of data. Meaningful only for processing many of very short queries. -->
|
||||
<use_uncompressed_cache>0</use_uncompressed_cache>
|
||||
|
||||
<!-- How to choose between replicas during distributed query processing.
|
||||
random - choose random replica from set of replicas with minimum number of errors
|
||||
nearest_hostname - from set of replicas with minimum number of errors, choose replica
|
||||
with minimum number of different symbols between replica's hostname and local hostname
|
||||
(Hamming distance).
|
||||
in_order - first live replica is chosen in specified order.
|
||||
first_or_random - if first replica one has higher number of errors, pick a random one from replicas with minimum number of errors.
|
||||
-->
|
||||
<load_balancing>random</load_balancing>
|
||||
</default>
|
||||
|
||||
<!-- Profile that allows only read queries. -->
|
||||
<readonly>
|
||||
<readonly>1</readonly>
|
||||
</readonly>
|
||||
</profiles>
|
||||
|
||||
<!-- Users and ACL. -->
|
||||
<users>
|
||||
<!-- If user name was not specified, 'default' user is used. -->
|
||||
<default>
|
||||
<!-- Password could be specified in plaintext or in SHA256 (in hex format).
|
||||
|
||||
If you want to specify password in plaintext (not recommended), place it in 'password' element.
|
||||
Example: <password>qwerty</password>.
|
||||
Password could be empty.
|
||||
|
||||
If you want to specify SHA256, place it in 'password_sha256_hex' element.
|
||||
Example: <password_sha256_hex>65e84be33532fb784c48129675f9eff3a682b27168c0ea744b2cf58ee02337c5</password_sha256_hex>
|
||||
Restrictions of SHA256: impossibility to connect to ClickHouse using MySQL JS client (as of July 2019).
|
||||
|
||||
If you want to specify double SHA1, place it in 'password_double_sha1_hex' element.
|
||||
Example: <password_double_sha1_hex>e395796d6546b1b65db9d665cd43f0e858dd4303</password_double_sha1_hex>
|
||||
|
||||
How to generate decent password:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | sha256sum | tr -d '-'
|
||||
In first line will be password and in second - corresponding SHA256.
|
||||
|
||||
How to generate double SHA1:
|
||||
Execute: PASSWORD=$(base64 < /dev/urandom | head -c8); echo "$PASSWORD"; echo -n "$PASSWORD" | openssl dgst -sha1 -binary | openssl dgst -sha1
|
||||
In first line will be password and in second - corresponding double SHA1.
|
||||
-->
|
||||
<password>secret</password>
|
||||
|
||||
<!-- List of networks with open access.
|
||||
|
||||
To open access from everywhere, specify:
|
||||
<ip>::/0</ip>
|
||||
|
||||
To open access only from localhost, specify:
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
|
||||
Each element of list has one of the following forms:
|
||||
<ip> IP-address or network mask. Examples: 213.180.204.3 or 10.0.0.1/8 or 10.0.0.1/255.255.255.0
|
||||
2a02:6b8::3 or 2a02:6b8::3/64 or 2a02:6b8::3/ffff:ffff:ffff:ffff::.
|
||||
<host> Hostname. Example: server01.yandex.ru.
|
||||
To check access, DNS query is performed, and all received addresses compared to peer address.
|
||||
<host_regexp> Regular expression for host names. Example, ^server\d\d-\d\d-\d\.yandex\.ru$
|
||||
To check access, DNS PTR query is performed for peer address and then regexp is applied.
|
||||
Then, for result of PTR query, another DNS query is performed and all received addresses compared to peer address.
|
||||
Strongly recommended that regexp is ends with $
|
||||
All results of DNS requests are cached till server restart.
|
||||
-->
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::/0</ip>
|
||||
</networks>
|
||||
|
||||
<!-- Settings profile for user. -->
|
||||
<profile>default</profile>
|
||||
|
||||
<!-- Quota for user. -->
|
||||
<quota>default</quota>
|
||||
|
||||
<!-- For testing the table filters -->
|
||||
<databases>
|
||||
<test>
|
||||
<!-- Simple expression filter -->
|
||||
<filtered_table1>
|
||||
<filter>a = 1</filter>
|
||||
</filtered_table1>
|
||||
|
||||
<!-- Complex expression filter -->
|
||||
<filtered_table2>
|
||||
<filter>a + b < 1 or c - d > 5</filter>
|
||||
</filtered_table2>
|
||||
|
||||
<!-- Filter with ALIAS column -->
|
||||
<filtered_table3>
|
||||
<filter>c = 1</filter>
|
||||
</filtered_table3>
|
||||
</test>
|
||||
</databases>
|
||||
</default>
|
||||
|
||||
<!-- Example of user with readonly access. -->
|
||||
<!-- <readonly>
|
||||
<password></password>
|
||||
<networks incl="networks" replace="replace">
|
||||
<ip>::1</ip>
|
||||
<ip>127.0.0.1</ip>
|
||||
</networks>
|
||||
<profile>readonly</profile>
|
||||
<quota>default</quota>
|
||||
</readonly> -->
|
||||
</users>
|
||||
|
||||
<!-- Quotas. -->
|
||||
<quotas>
|
||||
<!-- Name of quota. -->
|
||||
<default>
|
||||
<!-- Limits for time interval. You could specify many intervals with different limits. -->
|
||||
<interval>
|
||||
<!-- Length of interval. -->
|
||||
<duration>3600</duration>
|
||||
|
||||
<!-- No limits. Just calculate resource usage for time interval. -->
|
||||
<queries>0</queries>
|
||||
<errors>0</errors>
|
||||
<result_rows>0</result_rows>
|
||||
<read_rows>0</read_rows>
|
||||
<execution_time>0</execution_time>
|
||||
</interval>
|
||||
</default>
|
||||
</quotas>
|
||||
</yandex>
|
10
docker-compose.neo4j.yml
Normal file
10
docker-compose.neo4j.yml
Normal file
@ -0,0 +1,10 @@
|
||||
neo4j:
|
||||
ports:
|
||||
- '7401:7474'
|
||||
- '7602:7687'
|
||||
environment:
|
||||
- NEO4J_AUTH=none
|
||||
volumes:
|
||||
- '/root/neo4j/data:/data'
|
||||
- '/root/neo4j/logs:/logs'
|
||||
image: 'neo4j:latest'
|
@ -1,4 +1,4 @@
|
||||
version: '3'
|
||||
version: '3.5'
|
||||
|
||||
services:
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
version: '3'
|
||||
version: '3.5'
|
||||
|
||||
networks:
|
||||
frontend:
|
||||
@ -53,6 +53,8 @@ volumes:
|
||||
driver: ${VOLUMES_DRIVER}
|
||||
docker-in-docker:
|
||||
driver: ${VOLUMES_DRIVER}
|
||||
react:
|
||||
driver: ${VOLUMES_DRIVER}
|
||||
|
||||
services:
|
||||
|
||||
@ -91,6 +93,10 @@ services:
|
||||
- NVM_NODEJS_ORG_MIRROR=${WORKSPACE_NVM_NODEJS_ORG_MIRROR}
|
||||
- INSTALL_NODE=${WORKSPACE_INSTALL_NODE}
|
||||
- NPM_REGISTRY=${WORKSPACE_NPM_REGISTRY}
|
||||
- NPM_FETCH_RETRIES=${WORKSPACE_NPM_FETCH_RETRIES}
|
||||
- NPM_FETCH_RETRY_FACTOR=${WORKSPACE_NPM_FETCH_RETRY_FACTOR}
|
||||
- NPM_FETCH_RETRY_MINTIMEOUT=${WORKSPACE_NPM_FETCH_RETRY_MINTIMEOUT}
|
||||
- NPM_FETCH_RETRY_MAXTIMEOUT=${WORKSPACE_NPM_FETCH_RETRY_MAXTIMEOUT}
|
||||
- INSTALL_PNPM=${WORKSPACE_INSTALL_PNPM}
|
||||
- INSTALL_YARN=${WORKSPACE_INSTALL_YARN}
|
||||
- INSTALL_NPM_GULP=${WORKSPACE_INSTALL_NPM_GULP}
|
||||
@ -254,6 +260,9 @@ services:
|
||||
- PUID=${PHP_FPM_PUID}
|
||||
- PGID=${PHP_FPM_PGID}
|
||||
- LOCALE=${PHP_FPM_DEFAULT_LOCALE}
|
||||
- PHP_FPM_NEW_RELIC=${PHP_FPM_NEW_RELIC}
|
||||
- PHP_FPM_NEW_RELIC_KEY=${PHP_FPM_NEW_RELIC_KEY}
|
||||
- PHP_FPM_NEW_RELIC_APP_NAME=${PHP_FPM_NEW_RELIC_APP_NAME}
|
||||
- http_proxy
|
||||
- https_proxy
|
||||
- no_proxy
|
||||
@ -568,14 +577,15 @@ services:
|
||||
|
||||
### Neo4j ################################################
|
||||
neo4j:
|
||||
build: ./neo4j
|
||||
ports:
|
||||
- "7474:7474"
|
||||
- "1337:1337"
|
||||
- '7401:7474'
|
||||
- '7402:7687'
|
||||
environment:
|
||||
- NEO4J_AUTH=default:secret
|
||||
- NEO4J_AUTH=none
|
||||
volumes:
|
||||
- ${DATA_PATH_HOST}/neo4j:/var/lib/neo4j/data
|
||||
- ${DATA_PATH_HOST}/neo4j/data:/data
|
||||
- ${DATA_PATH_HOST}/neo4j/logs:/logs
|
||||
image: 'neo4j:latest'
|
||||
networks:
|
||||
- backend
|
||||
|
||||
@ -600,7 +610,38 @@ services:
|
||||
networks:
|
||||
- backend
|
||||
|
||||
### Redis ################################################
|
||||
### ClickHouse #############################################
|
||||
clickhouse:
|
||||
build:
|
||||
context: ./clickhouse
|
||||
args:
|
||||
- CLICKHOUSE_VERSION=${CLICKHOUSE_VERSION}
|
||||
- CLICKHOUSE_GOSU_VERSION=${CLICKHOUSE_GOSU_VERSION}
|
||||
environment:
|
||||
- CLICKHOUSE_USER=${CLICKHOUSE_USER}
|
||||
- CLICKHOUSE_PASSWORD=${CLICKHOUSE_PASSWORD}
|
||||
volumes:
|
||||
- ${DATA_PATH_HOST}/clickhouse:/var/lib/clickhouse
|
||||
- ${CLICKHOUSE_CUSTOM_CONFIG}:/etc/clickhouse-server/config.xml
|
||||
- ${CLICKHOUSE_USERS_CUSTOM_CONFIG}:/etc/clickhouse-server/users.xml
|
||||
- ${CLICKHOUSE_HOST_LOG_PATH}:/var/log/clickhouse
|
||||
- ${CLICKHOUSE_ENTRYPOINT_INITDB}:/docker-entrypoint-initdb.d
|
||||
links:
|
||||
- workspace
|
||||
ports:
|
||||
- "${CLICKHOUSE_HTTP_PORT}:8123"
|
||||
- "${CLICKHOUSE_CLIENT_PORT}:9000"
|
||||
- "${CLICKHOUSE_NATIVE_PORT}:9009"
|
||||
ulimits:
|
||||
nproc: 65535
|
||||
nofile:
|
||||
soft: 262144
|
||||
hard: 262144
|
||||
networks:
|
||||
# - frontend
|
||||
- backend
|
||||
|
||||
### Redis ################################################
|
||||
redis:
|
||||
build: ./redis
|
||||
volumes:
|
||||
@ -1817,3 +1858,20 @@ services:
|
||||
- ${DATA_PATH_HOST}/tomcat/logs:/usr/local/tomcat/logs
|
||||
# restart: always
|
||||
|
||||
### react #####################################################
|
||||
react:
|
||||
build:
|
||||
context: ./react
|
||||
ports:
|
||||
- "3000:3000"
|
||||
container_name: react
|
||||
stdin_open: true
|
||||
environment:
|
||||
- CHOKIDAR_USEPOLLING=true
|
||||
networks:
|
||||
- frontend
|
||||
- backend
|
||||
volumes:
|
||||
- ./react:/usr/src/app/react
|
||||
- /usr/src/app/react/node_modules
|
||||
|
||||
|
26
env-example
26
env-example
@ -76,6 +76,8 @@ COMPOSE_CONVERT_WINDOWS_PATHS=1
|
||||
CHANGE_SOURCE=false
|
||||
# Set CHANGE_SOURCE and UBUNTU_SOURCE option if you want to change the Ubuntu system sources.list file.
|
||||
UBUNTU_SOURCE=aliyun
|
||||
# Set ORACLE INSTANT_CLIENT_MIRROR option if you want to use Intranet improve download, you can download files first
|
||||
ORACLE_INSTANT_CLIENT_MIRROR=https://github.com/diogomascarenha/oracle-instantclient/raw/master/
|
||||
|
||||
### Docker Sync ###########################################
|
||||
|
||||
@ -104,6 +106,10 @@ WORKSPACE_NVM_NODEJS_ORG_MIRROR=
|
||||
WORKSPACE_INSTALL_NODE=true
|
||||
WORKSPACE_NODE_VERSION=node
|
||||
WORKSPACE_NPM_REGISTRY=
|
||||
WORKSPACE_NPM_FETCH_RETRIES=2
|
||||
WORKSPACE_NPM_FETCH_RETRY_FACTOR=10
|
||||
WORKSPACE_NPM_FETCH_RETRY_MINTIMEOUT=10000
|
||||
WORKSPACE_NPM_FETCH_RETRY_MAXTIMEOUT=60000
|
||||
WORKSPACE_INSTALL_PNPM=false
|
||||
WORKSPACE_INSTALL_YARN=true
|
||||
WORKSPACE_YARN_VERSION=latest
|
||||
@ -252,6 +258,12 @@ PHP_FPM_DEFAULT_LOCALE=POSIX
|
||||
PHP_FPM_PUID=1000
|
||||
PHP_FPM_PGID=1000
|
||||
|
||||
### PHP_FPM_NEW_RELIC #####################################
|
||||
|
||||
PHP_FPM_NEW_RELIC=false
|
||||
PHP_FPM_NEW_RELIC_KEY=0000
|
||||
PHP_FPM_NEW_RELIC_APP_NAME=app_name
|
||||
|
||||
### PHP_WORKER ############################################
|
||||
|
||||
PHP_WORKER_INSTALL_BZ2=false
|
||||
@ -329,6 +341,20 @@ MYSQL_PORT=3306
|
||||
MYSQL_ROOT_PASSWORD=root
|
||||
MYSQL_ENTRYPOINT_INITDB=./mysql/docker-entrypoint-initdb.d
|
||||
|
||||
### CLICKHOUSE #################################################
|
||||
|
||||
CLICKHOUSE_VERSION=20.9.4.76
|
||||
CLICKHOUSE_GOSU_VERSION=1.10
|
||||
CLICKHOUSE_CUSTOM_CONFIG=./clickhouse/config.xml
|
||||
CLICKHOUSE_USERS_CUSTOM_CONFIG=./clickhouse/users.xml
|
||||
CLICKHOUSE_USER=default
|
||||
CLICKHOUSE_PASSWORD=HAHA
|
||||
CLICKHOUSE_HTTP_PORT=8123
|
||||
CLICKHOUSE_CLIENT_PORT=9000
|
||||
CLICKHOUSE_NATIVE_PORT=9009
|
||||
CLICKHOUSE_ENTRYPOINT_INITDB=./clickhouse/docker-entrypoint-initdb.d
|
||||
CLICKHOUSE_HOST_LOG_PATH=./logs/clickhouse
|
||||
|
||||
### REDIS #################################################
|
||||
|
||||
REDIS_PORT=6379
|
||||
|
@ -57,7 +57,11 @@ RUN if [ ${INSTALL_BZ2} = true ]; then \
|
||||
ARG INSTALL_GD=false
|
||||
RUN if [ ${INSTALL_GD} = true ]; then \
|
||||
apk add --update --no-cache freetype-dev libjpeg-turbo-dev jpeg-dev libpng-dev; \
|
||||
docker-php-ext-configure gd --with-freetype-dir=/usr/lib/ --with-jpeg-dir=/usr/lib/ --with-png-dir=/usr/lib/ && \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.4" ]; then \
|
||||
docker-php-ext-configure gd --with-freetype --with-jpeg; \
|
||||
else \
|
||||
docker-php-ext-configure gd --with-freetype-dir=/usr/lib/ --with-jpeg-dir=/usr/lib/ --with-png-dir=/usr/lib/; \
|
||||
fi && \
|
||||
docker-php-ext-install gd \
|
||||
;fi
|
||||
|
||||
@ -102,7 +106,7 @@ ARG INSTALL_ZIP_ARCHIVE=false
|
||||
RUN set -eux; \
|
||||
if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \
|
||||
apk --update add libzip-dev && \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ]; then \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ] || [ $(php -r "echo PHP_MAJOR_VERSION;") = "8" ]; then \
|
||||
docker-php-ext-configure zip; \
|
||||
else \
|
||||
docker-php-ext-configure zip --with-libzip; \
|
||||
|
0
neo4j/.sentinel
Normal file
0
neo4j/.sentinel
Normal file
@ -1,7 +1,51 @@
|
||||
FROM tpires/neo4j
|
||||
FROM openjdk:8-jre-slim
|
||||
|
||||
LABEL maintainer="Mahmoud Zalt <mahmoud@zalt.me>"
|
||||
ENV NEO4J_SHA256=1c8b6ac0ffd346f0707fe1af713ef74f1c6ce1ea6feb5e9a0bd170e7a8a34a10 \
|
||||
NEO4J_TARBALL=neo4j-community-3.5.17-unix.tar.gz \
|
||||
NEO4J_EDITION=community \
|
||||
NEO4J_HOME="/var/lib/neo4j" \
|
||||
TINI_VERSION="v0.18.0" \
|
||||
TINI_SHA256="12d20136605531b09a2c2dac02ccee85e1b874eb322ef6baf7561cd93f93c855"
|
||||
ARG NEO4J_URI=https://dist.neo4j.org/neo4j-community-3.5.17-unix.tar.gz
|
||||
|
||||
VOLUME /var/lib/neo4j/data
|
||||
RUN addgroup --system neo4j && adduser --system --no-create-home --home "${NEO4J_HOME}" --ingroup neo4j neo4j
|
||||
|
||||
EXPOSE 7474 1337
|
||||
COPY ./local-package/* /tmp/
|
||||
|
||||
RUN apt update \
|
||||
&& apt install -y curl wget gosu jq \
|
||||
&& curl -L --fail --silent --show-error "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini" > /sbin/tini \
|
||||
&& echo "${TINI_SHA256} /sbin/tini" | sha256sum -c --strict --quiet \
|
||||
&& chmod +x /sbin/tini \
|
||||
&& curl --fail --silent --show-error --location --remote-name ${NEO4J_URI} \
|
||||
&& echo "${NEO4J_SHA256} ${NEO4J_TARBALL}" | sha256sum -c --strict --quiet \
|
||||
&& tar --extract --file ${NEO4J_TARBALL} --directory /var/lib \
|
||||
&& mv /var/lib/neo4j-* "${NEO4J_HOME}" \
|
||||
&& rm ${NEO4J_TARBALL} \
|
||||
&& mv "${NEO4J_HOME}"/data /data \
|
||||
&& mv "${NEO4J_HOME}"/logs /logs \
|
||||
&& chown -R neo4j:neo4j /data \
|
||||
&& chmod -R 777 /data \
|
||||
&& chown -R neo4j:neo4j /logs \
|
||||
&& chmod -R 777 /logs \
|
||||
&& chown -R neo4j:neo4j "${NEO4J_HOME}" \
|
||||
&& chmod -R 777 "${NEO4J_HOME}" \
|
||||
&& ln -s /data "${NEO4J_HOME}"/data \
|
||||
&& ln -s /logs "${NEO4J_HOME}"/logs \
|
||||
&& mv /tmp/neo4jlabs-plugins.json /neo4jlabs-plugins.json \
|
||||
&& rm -rf /tmp/* \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get -y purge --auto-remove curl
|
||||
|
||||
ENV PATH "${NEO4J_HOME}"/bin:$PATH
|
||||
|
||||
WORKDIR "${NEO4J_HOME}"
|
||||
|
||||
VOLUME /data /logs
|
||||
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
|
||||
EXPOSE 7474 7473 7687
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "-g", "--", "/docker-entrypoint.sh"]
|
||||
CMD ["neo4j"]
|
||||
|
484
neo4j/docker-entrypoint.sh
Executable file
484
neo4j/docker-entrypoint.sh
Executable file
@ -0,0 +1,484 @@
|
||||
#!/bin/bash -eu
|
||||
|
||||
cmd="$1"
|
||||
|
||||
function running_as_root
|
||||
{
|
||||
test "$(id -u)" = "0"
|
||||
}
|
||||
|
||||
function secure_mode_enabled
|
||||
{
|
||||
test "${SECURE_FILE_PERMISSIONS:=no}" = "yes"
|
||||
}
|
||||
|
||||
function containsElement
|
||||
{
|
||||
local e match="$1"
|
||||
shift
|
||||
for e; do [[ "$e" == "$match" ]] && return 0; done
|
||||
return 1
|
||||
}
|
||||
|
||||
function is_readable
|
||||
{
|
||||
# this code is fairly ugly but works no matter who this script is running as.
|
||||
# It would be nice if the writability tests could use this logic somehow.
|
||||
local _file=${1}
|
||||
perm=$(stat -c %a "${_file}")
|
||||
|
||||
# everyone permission
|
||||
if [[ ${perm:2:1} -ge 4 ]]; then
|
||||
return 0
|
||||
fi
|
||||
# owner permissions
|
||||
if [[ ${perm:0:1} -ge 4 ]]; then
|
||||
if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
# group permissions
|
||||
if [[ ${perm:1:1} -ge 4 ]]; then
|
||||
if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
function is_writable
|
||||
{
|
||||
# It would be nice if this and the is_readable function could combine somehow
|
||||
local _file=${1}
|
||||
perm=$(stat -c %a "${_file}")
|
||||
|
||||
# everyone permission
|
||||
if containsElement ${perm:2:1} 2 3 6 7; then
|
||||
return 0
|
||||
fi
|
||||
# owner permissions
|
||||
if containsElement ${perm:0:1} 2 3 6 7; then
|
||||
if [[ "$(stat -c %U ${_file})" = "${userid}" ]] || [[ "$(stat -c %u ${_file})" = "${userid}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
# group permissions
|
||||
if containsElement ${perm:1:1} 2 3 6 7; then
|
||||
if containsElement "$(stat -c %g ${_file})" "${groups[@]}" || containsElement "$(stat -c %G ${_file})" "${groups[@]}" ; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
function print_permissions_advice_and_fail
|
||||
{
|
||||
_directory=${1}
|
||||
echo >&2 "
|
||||
Folder ${_directory} is not accessible for user: ${userid} or group ${groupid} or groups ${groups[@]}, this is commonly a file permissions issue on the mounted folder.
|
||||
|
||||
Hints to solve the issue:
|
||||
1) Make sure the folder exists before mounting it. Docker will create the folder using root permissions before starting the Neo4j container. The root permissions disallow Neo4j from writing to the mounted folder.
|
||||
2) Pass the folder owner's user ID and group ID to docker run, so that docker runs as that user.
|
||||
If the folder is owned by the current user, this can be done by adding this flag to your docker run command:
|
||||
--user=\$(id -u):\$(id -g)
|
||||
"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function check_mounted_folder_readable
|
||||
{
|
||||
local _directory=${1}
|
||||
if ! is_readable "${_directory}"; then
|
||||
print_permissions_advice_and_fail "${_directory}"
|
||||
fi
|
||||
}
|
||||
|
||||
function check_mounted_folder_with_chown
|
||||
{
|
||||
# The /data and /log directory are a bit different because they are very likely to be mounted by the user but not
|
||||
# necessarily writable.
|
||||
# This depends on whether a user ID is passed to the container and which folders are mounted.
|
||||
#
|
||||
# No user ID passed to container:
|
||||
# 1) No folders are mounted.
|
||||
# The /data and /log folder are owned by neo4j by default, so should be writable already.
|
||||
# 2) Both /log and /data are mounted.
|
||||
# This means on start up, /data and /logs are owned by an unknown user and we should chown them to neo4j for
|
||||
# backwards compatibility.
|
||||
#
|
||||
# User ID passed to container:
|
||||
# 1) Both /data and /logs are mounted
|
||||
# The /data and /logs folders are owned by an unknown user but we *should* have rw permission to them.
|
||||
# That should be verified and error (helpfully) if not.
|
||||
# 2) User mounts /data or /logs *but not both*
|
||||
# The unmounted folder is still owned by neo4j, which should already be writable. The mounted folder should
|
||||
# have rw permissions through user id. This should be verified.
|
||||
# 3) No folders are mounted.
|
||||
# The /data and /log folder are owned by neo4j by default, and these are already writable by the user.
|
||||
# (This is a very unlikely use case).
|
||||
|
||||
local mountFolder=${1}
|
||||
if running_as_root; then
|
||||
if ! is_writable "${mountFolder}" && ! secure_mode_enabled; then
|
||||
# warn that we're about to chown the folder and then chown it
|
||||
echo "Warning: Folder mounted to \"${mountFolder}\" is not writable from inside container. Changing folder owner to ${userid}."
|
||||
chown -R "${userid}":"${groupid}" "${mountFolder}"
|
||||
fi
|
||||
else
|
||||
if [[ ! -w "${mountFolder}" ]] && [[ "$(stat -c %U ${mountFolder})" != "neo4j" ]]; then
|
||||
print_permissions_advice_and_fail "${mountFolder}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function load_plugin_from_github
|
||||
{
|
||||
# Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
|
||||
# correct format.
|
||||
local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
|
||||
|
||||
local _plugins_dir="${NEO4J_HOME}/plugins"
|
||||
if [ -d /plugins ]; then
|
||||
local _plugins_dir="/plugins"
|
||||
fi
|
||||
local _versions_json_url="$(jq --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.versions" /neo4jlabs-plugins.json )"
|
||||
# Using the same name for the plugin irrespective of version ensures we don't end up with different versions of the same plugin
|
||||
local _destination="${_plugins_dir}/${_plugin_name}.jar"
|
||||
local _neo4j_version="$(neo4j --version | cut -d' ' -f2)"
|
||||
|
||||
# Now we call out to github to get the versions.json for this plugin and we parse that to find the url for the correct plugin jar for our neo4j version
|
||||
echo "Fetching versions.json for Plugin '${_plugin_name}' from ${_versions_json_url}"
|
||||
local _versions_json="$(wget -q --timeout 300 --tries 30 -O - "${_versions_json_url}")"
|
||||
local _plugin_jar_url="$(echo "${_versions_json}" | jq --raw-output ".[] | select(.neo4j==\"${_neo4j_version}\") | .jar")"
|
||||
if [[ -z "${_plugin_jar_url}" ]]; then
|
||||
echo >&2 "Error: No jar URL found for version '${_neo4j_version}' in versions.json from '${_versions_json_url}'"
|
||||
echo >&2 "${_versions_json}"
|
||||
exit 1
|
||||
fi
|
||||
echo "Installing Plugin '${_plugin_name}' from ${_plugin_jar_url} to ${_destination} "
|
||||
wget -q --timeout 300 --tries 30 --output-document="${_destination}" "${_plugin_jar_url}"
|
||||
|
||||
if ! is_readable "${_destination}"; then
|
||||
echo >&2 "Plugin at '${_destination}' is not readable"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function apply_plugin_default_configuration
|
||||
{
|
||||
# Set the correct Load a plugin at runtime. The provided github repository must have a versions.json on the master branch with the
|
||||
# correct format.
|
||||
local _plugin_name="${1}" #e.g. apoc, graph-algorithms, graph-ql
|
||||
local _reference_conf="${2}" # used to determine if we can override properties
|
||||
local _neo4j_conf="${NEO4J_HOME}/conf/neo4j.conf"
|
||||
|
||||
local _property _value
|
||||
echo "Applying default values for plugin ${_plugin_name} to neo4j.conf"
|
||||
for _entry in $(jq --compact-output --raw-output "with_entries( select(.key==\"${_plugin_name}\") ) | to_entries[] | .value.properties | to_entries[]" /neo4jlabs-plugins.json); do
|
||||
_property="$(jq --raw-output '.key' <<< "${_entry}")"
|
||||
_value="$(jq --raw-output '.value' <<< "${_entry}")"
|
||||
|
||||
# the first grep strips out comments
|
||||
if grep -o "^[^#]*" "${_reference_conf}" | grep -q --fixed-strings "${_property}=" ; then
|
||||
# property is already set in the user provided config. In this case we don't override what has been set explicitly by the user.
|
||||
echo "Skipping ${_property} for plugin ${_plugin_name} because it is already set"
|
||||
else
|
||||
if grep -o "^[^#]*" "${_neo4j_conf}" | grep -q --fixed-strings "${_property}=" ; then
|
||||
sed --in-place "s/${_property}=/&${_value},/" "${_neo4j_conf}"
|
||||
else
|
||||
echo "${_property}=${_value}" >> "${_neo4j_conf}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function install_neo4j_labs_plugins
|
||||
{
|
||||
# We store a copy of the config before we modify it for the plugins to allow us to see if there are user-set values in the input config that we shouldn't override
|
||||
local _old_config="$(mktemp)"
|
||||
cp "${NEO4J_HOME}"/conf/neo4j.conf "${_old_config}"
|
||||
for plugin_name in $(echo "${NEO4JLABS_PLUGINS}" | jq --raw-output '.[]'); do
|
||||
load_plugin_from_github "${plugin_name}"
|
||||
apply_plugin_default_configuration "${plugin_name}" "${_old_config}"
|
||||
done
|
||||
rm "${_old_config}"
|
||||
}
|
||||
|
||||
# If we're running as root, then run as the neo4j user. Otherwise
|
||||
# docker is running with --user and we simply use that user. Note
|
||||
# that su-exec, despite its name, does not replicate the functionality
|
||||
# of exec, so we need to use both
|
||||
if running_as_root; then
|
||||
userid="neo4j"
|
||||
groupid="neo4j"
|
||||
groups=($(id -G neo4j))
|
||||
exec_cmd="exec gosu neo4j:neo4j"
|
||||
else
|
||||
userid="$(id -u)"
|
||||
groupid="$(id -g)"
|
||||
groups=($(id -G))
|
||||
exec_cmd="exec"
|
||||
fi
|
||||
readonly userid
|
||||
readonly groupid
|
||||
readonly groups
|
||||
readonly exec_cmd
|
||||
|
||||
|
||||
# Need to chown the home directory - but a user might have mounted a
|
||||
# volume here (notably a conf volume). So take care not to chown
|
||||
# volumes (stuff not owned by neo4j)
|
||||
if running_as_root; then
|
||||
# Non-recursive chown for the base directory
|
||||
chown "${userid}":"${groupid}" "${NEO4J_HOME}"
|
||||
chmod 700 "${NEO4J_HOME}"
|
||||
find "${NEO4J_HOME}" -mindepth 1 -maxdepth 1 -user root -type d -exec chown -R ${userid}:${groupid} {} \;
|
||||
find "${NEO4J_HOME}" -mindepth 1 -maxdepth 1 -type d -exec chmod -R 700 {} \;
|
||||
fi
|
||||
|
||||
# Only prompt for license agreement if command contains "neo4j" in it
|
||||
if [[ "${cmd}" == *"neo4j"* ]]; then
|
||||
if [ "${NEO4J_EDITION}" == "enterprise" ]; then
|
||||
if [ "${NEO4J_ACCEPT_LICENSE_AGREEMENT:=no}" != "yes" ]; then
|
||||
echo >&2 "
|
||||
In order to use Neo4j Enterprise Edition you must accept the license agreement.
|
||||
|
||||
(c) Neo4j Sweden AB. 2019. All Rights Reserved.
|
||||
Use of this Software without a proper commercial license with Neo4j,
|
||||
Inc. or its affiliates is prohibited.
|
||||
|
||||
Email inquiries can be directed to: licensing@neo4j.com
|
||||
|
||||
More information is also available at: https://neo4j.com/licensing/
|
||||
|
||||
|
||||
To accept the license agreement set the environment variable
|
||||
NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
|
||||
|
||||
To do this you can use the following docker argument:
|
||||
|
||||
--env=NEO4J_ACCEPT_LICENSE_AGREEMENT=yes
|
||||
"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Env variable naming convention:
|
||||
# - prefix NEO4J_
|
||||
# - double underscore char '__' instead of single underscore '_' char in the setting name
|
||||
# - underscore char '_' instead of dot '.' char in the setting name
|
||||
# Example:
|
||||
# NEO4J_dbms_tx__log_rotation_retention__policy env variable to set
|
||||
# dbms.tx_log.rotation.retention_policy setting
|
||||
|
||||
# Backward compatibility - map old hardcoded env variables into new naming convention (if they aren't set already)
|
||||
# Set some to default values if unset
|
||||
: ${NEO4J_dbms_tx__log_rotation_retention__policy:=${NEO4J_dbms_txLog_rotation_retentionPolicy:-"100M size"}}
|
||||
: ${NEO4J_wrapper_java_additional:=${NEO4J_UDC_SOURCE:-"-Dneo4j.ext.udc.source=docker"}}
|
||||
: ${NEO4J_dbms_unmanaged__extension__classes:=${NEO4J_dbms_unmanagedExtensionClasses:-}}
|
||||
: ${NEO4J_dbms_allow__format__migration:=${NEO4J_dbms_allowFormatMigration:-}}
|
||||
: ${NEO4J_dbms_connectors_default__advertised__address:=${NEO4J_dbms_connectors_defaultAdvertisedAddress:-}}
|
||||
|
||||
if [ "${NEO4J_EDITION}" == "enterprise" ];
|
||||
then
|
||||
: ${NEO4J_causal__clustering_expected__core__cluster__size:=${NEO4J_causalClustering_expectedCoreClusterSize:-}}
|
||||
: ${NEO4J_causal__clustering_initial__discovery__members:=${NEO4J_causalClustering_initialDiscoveryMembers:-}}
|
||||
: ${NEO4J_causal__clustering_discovery__advertised__address:=${NEO4J_causalClustering_discoveryAdvertisedAddress:-"$(hostname):5000"}}
|
||||
: ${NEO4J_causal__clustering_transaction__advertised__address:=${NEO4J_causalClustering_transactionAdvertisedAddress:-"$(hostname):6000"}}
|
||||
: ${NEO4J_causal__clustering_raft__advertised__address:=${NEO4J_causalClustering_raftAdvertisedAddress:-"$(hostname):7000"}}
|
||||
# Custom settings for dockerized neo4j
|
||||
: ${NEO4J_causal__clustering_discovery__advertised__address:=$(hostname):5000}
|
||||
: ${NEO4J_causal__clustering_transaction__advertised__address:=$(hostname):6000}
|
||||
: ${NEO4J_causal__clustering_raft__advertised__address:=$(hostname):7000}
|
||||
fi
|
||||
|
||||
# unset old hardcoded unsupported env variables
|
||||
unset NEO4J_dbms_txLog_rotation_retentionPolicy NEO4J_UDC_SOURCE \
|
||||
NEO4J_dbms_unmanagedExtensionClasses NEO4J_dbms_allowFormatMigration \
|
||||
NEO4J_dbms_connectors_defaultAdvertisedAddress NEO4J_ha_serverId \
|
||||
NEO4J_ha_initialHosts NEO4J_causalClustering_expectedCoreClusterSize \
|
||||
NEO4J_causalClustering_initialDiscoveryMembers \
|
||||
NEO4J_causalClustering_discoveryListenAddress \
|
||||
NEO4J_causalClustering_discoveryAdvertisedAddress \
|
||||
NEO4J_causalClustering_transactionListenAddress \
|
||||
NEO4J_causalClustering_transactionAdvertisedAddress \
|
||||
NEO4J_causalClustering_raftListenAddress \
|
||||
NEO4J_causalClustering_raftAdvertisedAddress
|
||||
|
||||
if [ -d /conf ]; then
|
||||
if secure_mode_enabled; then
|
||||
check_mounted_folder_readable "/conf"
|
||||
fi
|
||||
find /conf -type f -exec cp {} "${NEO4J_HOME}"/conf \;
|
||||
fi
|
||||
|
||||
if [ -d /ssl ]; then
|
||||
if secure_mode_enabled; then
|
||||
check_mounted_folder_readable "/ssl"
|
||||
fi
|
||||
: ${NEO4J_dbms_directories_certificates:="/ssl"}
|
||||
fi
|
||||
|
||||
if [ -d /plugins ]; then
|
||||
if secure_mode_enabled; then
|
||||
if [[ ! -z "${NEO4JLABS_PLUGINS:-}" ]]; then
|
||||
# We need write permissions
|
||||
check_mounted_folder_with_chown "/plugins"
|
||||
fi
|
||||
check_mounted_folder_readable "/plugins"
|
||||
fi
|
||||
: ${NEO4J_dbms_directories_plugins:="/plugins"}
|
||||
fi
|
||||
|
||||
if [ -d /import ]; then
|
||||
if secure_mode_enabled; then
|
||||
check_mounted_folder_readable "/import"
|
||||
fi
|
||||
: ${NEO4J_dbms_directories_import:="/import"}
|
||||
fi
|
||||
|
||||
if [ -d /metrics ]; then
|
||||
if secure_mode_enabled; then
|
||||
check_mounted_folder_readable "/metrics"
|
||||
fi
|
||||
: ${NEO4J_dbms_directories_metrics:="/metrics"}
|
||||
fi
|
||||
|
||||
if [ -d /logs ]; then
|
||||
check_mounted_folder_with_chown "/logs"
|
||||
: ${NEO4J_dbms_directories_logs:="/logs"}
|
||||
fi
|
||||
|
||||
if [ -d /data ]; then
|
||||
check_mounted_folder_with_chown "/data"
|
||||
if [ -d /data/databases ]; then
|
||||
check_mounted_folder_with_chown "/data/databases"
|
||||
fi
|
||||
if [ -d /data/dbms ]; then
|
||||
check_mounted_folder_with_chown "/data/dbms"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
# set the neo4j initial password only if you run the database server
|
||||
if [ "${cmd}" == "neo4j" ]; then
|
||||
if [ "${NEO4J_AUTH:-}" == "none" ]; then
|
||||
NEO4J_dbms_security_auth__enabled=false
|
||||
elif [[ "${NEO4J_AUTH:-}" == neo4j/* ]]; then
|
||||
password="${NEO4J_AUTH#neo4j/}"
|
||||
if [ "${password}" == "neo4j" ]; then
|
||||
echo >&2 "Invalid value for password. It cannot be 'neo4j', which is the default."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if running_as_root; then
|
||||
# running set-initial-password as root will create subfolders to /data as root, causing startup fail when neo4j can't read or write the /data/dbms folder
|
||||
# creating the folder first will avoid that
|
||||
mkdir -p /data/dbms
|
||||
chown "${userid}":"${groupid}" /data/dbms
|
||||
fi
|
||||
# Will exit with error if users already exist (and print a message explaining that)
|
||||
# we probably don't want the message though, since it throws an error message on restarting the container.
|
||||
neo4j-admin set-initial-password "${password}" 2>/dev/null || true
|
||||
elif [ -n "${NEO4J_AUTH:-}" ]; then
|
||||
echo >&2 "Invalid value for NEO4J_AUTH: '${NEO4J_AUTH}'"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
declare -A COMMUNITY
|
||||
declare -A ENTERPRISE
|
||||
|
||||
COMMUNITY=(
|
||||
[dbms.tx_log.rotation.retention_policy]="100M size"
|
||||
[dbms.memory.pagecache.size]="512M"
|
||||
[dbms.connectors.default_listen_address]="0.0.0.0"
|
||||
[dbms.connector.https.listen_address]="0.0.0.0:7473"
|
||||
[dbms.connector.http.listen_address]="0.0.0.0:7474"
|
||||
[dbms.connector.bolt.listen_address]="0.0.0.0:7687"
|
||||
)
|
||||
|
||||
ENTERPRISE=(
|
||||
[causal_clustering.transaction_listen_address]="0.0.0.0:6000"
|
||||
[causal_clustering.raft_listen_address]="0.0.0.0:7000"
|
||||
[causal_clustering.discovery_listen_address]="0.0.0.0:5000"
|
||||
)
|
||||
|
||||
for conf in ${!COMMUNITY[@]} ; do
|
||||
|
||||
if ! grep -q "^$conf" "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
then
|
||||
echo -e "\n"$conf=${COMMUNITY[$conf]} >> "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
fi
|
||||
done
|
||||
|
||||
for conf in ${!ENTERPRISE[@]} ; do
|
||||
|
||||
if [ "${NEO4J_EDITION}" == "enterprise" ];
|
||||
then
|
||||
if ! grep -q "^$conf" "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
then
|
||||
echo -e "\n"$conf=${ENTERPRISE[$conf]} >> "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
#The udc.source=tarball should be replaced by udc.source=docker in both dbms.jvm.additional and wrapper.java.additional
|
||||
#Using sed to replace only this part will allow the custom configs to be added after, separated by a ,.
|
||||
if grep -q "udc.source=tarball" "${NEO4J_HOME}"/conf/neo4j.conf; then
|
||||
sed -i -e 's/udc.source=tarball/udc.source=docker/g' "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
fi
|
||||
#The udc.source should always be set to docker by default and we have to allow also custom configs to be added after that.
|
||||
#In this case, this piece of code helps to add the default value and a , to support custom configs after.
|
||||
if ! grep -q "dbms.jvm.additional=-Dunsupported.dbms.udc.source=docker" "${NEO4J_HOME}"/conf/neo4j.conf; then
|
||||
sed -i -e 's/dbms.jvm.additional=/dbms.jvm.additional=-Dunsupported.dbms.udc.source=docker,/g' "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
fi
|
||||
|
||||
# list env variables with prefix NEO4J_ and create settings from them
|
||||
unset NEO4J_AUTH NEO4J_SHA256 NEO4J_TARBALL
|
||||
for i in $( set | grep ^NEO4J_ | awk -F'=' '{print $1}' | sort -rn ); do
|
||||
setting=$(echo ${i} | sed 's|^NEO4J_||' | sed 's|_|.|g' | sed 's|\.\.|_|g')
|
||||
value=$(echo ${!i})
|
||||
# Don't allow settings with no value or settings that start with a number (neo4j converts settings to env variables and you cannot have an env variable that starts with a number)
|
||||
if [[ -n ${value} ]]; then
|
||||
if [[ ! "${setting}" =~ ^[0-9]+.*$ ]]; then
|
||||
if grep -q -F "${setting}=" "${NEO4J_HOME}"/conf/neo4j.conf; then
|
||||
# Remove any lines containing the setting already
|
||||
sed --in-place "/^${setting}=.*/d" "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
fi
|
||||
# Then always append setting to file
|
||||
echo "${setting}=${value}" >> "${NEO4J_HOME}"/conf/neo4j.conf
|
||||
else
|
||||
echo >&2 "WARNING: ${setting} not written to conf file because settings that start with a number are not permitted"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
|
||||
if [[ ! -z "${NEO4JLABS_PLUGINS:-}" ]]; then
|
||||
# NEO4JLABS_PLUGINS should be a json array of plugins like '["graph-algorithms", "apoc", "streams", "graphql"]'
|
||||
install_neo4j_labs_plugins
|
||||
fi
|
||||
|
||||
[ -f "${EXTENSION_SCRIPT:-}" ] && . ${EXTENSION_SCRIPT}
|
||||
|
||||
if [ "${cmd}" == "dump-config" ]; then
|
||||
if ! is_writable "/conf"; then
|
||||
print_permissions_advice_and_fail "/conf"
|
||||
fi
|
||||
cp --recursive "${NEO4J_HOME}"/conf/* /conf
|
||||
echo "Config Dumped"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Use su-exec to drop privileges to neo4j user
|
||||
# Note that su-exec, despite its name, does not replicate the
|
||||
# functionality of exec, so we need to use both
|
||||
if [ "${cmd}" == "neo4j" ]; then
|
||||
${exec_cmd} neo4j console
|
||||
else
|
||||
${exec_cmd} "$@"
|
||||
fi
|
0
neo4j/local-package/.sentinel
Normal file
0
neo4j/local-package/.sentinel
Normal file
37
neo4j/local-package/neo4jlabs-plugins.json
Normal file
37
neo4j/local-package/neo4jlabs-plugins.json
Normal file
@ -0,0 +1,37 @@
|
||||
{
|
||||
"apoc": {
|
||||
"versions": "https://neo4j-contrib.github.io/neo4j-apoc-procedures/versions.json",
|
||||
"properties": {
|
||||
"dbms.security.procedures.unrestricted": "apoc.*"
|
||||
}
|
||||
},
|
||||
"streams": {
|
||||
"versions": "https://neo4j-contrib.github.io/neo4j-streams/versions.json",
|
||||
"properties": {}
|
||||
},
|
||||
"graphql": {
|
||||
"versions": "https://neo4j-graphql.github.io/neo4j-graphql/versions.json",
|
||||
"properties": {
|
||||
"dbms.unmanaged_extension_classes": "org.neo4j.graphql=/graphql",
|
||||
"dbms.security.procedures.unrestricted": "graphql.*"
|
||||
}
|
||||
},
|
||||
"graph-algorithms": {
|
||||
"versions": "https://neo4j-contrib.github.io/neo4j-graph-algorithms/versions.json",
|
||||
"properties": {
|
||||
"dbms.security.procedures.unrestricted":"algo.*"
|
||||
}
|
||||
},
|
||||
"n10s": {
|
||||
"versions": "https://neo4j-labs.github.io/neosemantics/versions.json",
|
||||
"properties": {
|
||||
"dbms.security.procedures.unrestricted":"semantics.*"
|
||||
}
|
||||
},
|
||||
"_testing": {
|
||||
"versions": "http://host.testcontainers.internal:3000/versions.json",
|
||||
"properties": {
|
||||
"dbms.security.procedures.unrestricted": "com.neo4j.docker.plugins.*"
|
||||
}
|
||||
}
|
||||
}
|
@ -4,6 +4,7 @@ if [ ! -f /etc/nginx/ssl/default.crt ]; then
|
||||
openssl genrsa -out "/etc/nginx/ssl/default.key" 2048
|
||||
openssl req -new -key "/etc/nginx/ssl/default.key" -out "/etc/nginx/ssl/default.csr" -subj "/CN=default/O=default/C=UK"
|
||||
openssl x509 -req -days 365 -in "/etc/nginx/ssl/default.csr" -signkey "/etc/nginx/ssl/default.key" -out "/etc/nginx/ssl/default.crt"
|
||||
chmod 644 /etc/nginx/ssl/default.key
|
||||
fi
|
||||
|
||||
# Start crond in background
|
||||
|
@ -51,7 +51,7 @@ RUN set -xe; \
|
||||
#
|
||||
# next lines are here becase there is no auto build on dockerhub see https://github.com/laradock/laradock/pull/1903#issuecomment-463142846
|
||||
libzip-dev zip unzip && \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ]; then \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ] || [ $(php -r "echo PHP_MAJOR_VERSION;") = "8" ]; then \
|
||||
docker-php-ext-configure zip; \
|
||||
else \
|
||||
docker-php-ext-configure zip --with-libzip; \
|
||||
@ -193,7 +193,7 @@ RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
if [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \
|
||||
pecl install xdebug-2.9.0; \
|
||||
else \
|
||||
pecl install xdebug; \
|
||||
pecl install xdebug-2.9.8; \
|
||||
fi \
|
||||
fi && \
|
||||
docker-php-ext-enable xdebug \
|
||||
@ -312,9 +312,10 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \
|
||||
pecl install mongo && \
|
||||
docker-php-ext-enable mongo \
|
||||
;fi && \
|
||||
pecl install mongodb && \
|
||||
docker-php-ext-enable mongodb \
|
||||
;else \
|
||||
pecl install mongodb && \
|
||||
docker-php-ext-enable mongodb \
|
||||
;fi \
|
||||
;fi
|
||||
|
||||
###########################################################################
|
||||
@ -489,6 +490,7 @@ RUN set -xe; \
|
||||
###########################################################################
|
||||
|
||||
ARG INSTALL_OCI8=false
|
||||
ARG ORACLE_INSTANT_CLIENT_MIRROR=https://github.com/diogomascarenha/oracle-instantclient/raw/master/
|
||||
|
||||
ENV LD_LIBRARY_PATH="/opt/oracle/instantclient_12_1"
|
||||
ENV OCI_HOME="/opt/oracle/instantclient_12_1"
|
||||
@ -502,8 +504,8 @@ RUN if [ ${INSTALL_OCI8} = true ]; then \
|
||||
# Install Oracle Instantclient
|
||||
&& mkdir /opt/oracle \
|
||||
&& cd /opt/oracle \
|
||||
&& wget https://github.com/diogomascarenha/oracle-instantclient/raw/master/instantclient-basic-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget https://github.com/diogomascarenha/oracle-instantclient/raw/master/instantclient-sdk-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget ${ORACLE_INSTANT_CLIENT_MIRROR}instantclient-basic-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget ${ORACLE_INSTANT_CLIENT_MIRROR}instantclient-sdk-linux.x64-12.1.0.2.0.zip \
|
||||
&& unzip /opt/oracle/instantclient-basic-linux.x64-12.1.0.2.0.zip -d /opt/oracle \
|
||||
&& unzip /opt/oracle/instantclient-sdk-linux.x64-12.1.0.2.0.zip -d /opt/oracle \
|
||||
&& ln -s /opt/oracle/instantclient_12_1/libclntsh.so.12.1 /opt/oracle/instantclient_12_1/libclntsh.so \
|
||||
@ -519,7 +521,7 @@ RUN if [ ${INSTALL_OCI8} = true ]; then \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8-2.0.10; \
|
||||
else \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8; \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8-2.2.0; \
|
||||
fi \
|
||||
&& docker-php-ext-configure pdo_oci --with-pdo-oci=instantclient,/opt/oracle/instantclient_12_1,12.1 \
|
||||
&& docker-php-ext-configure pdo_dblib --with-libdir=/lib/x86_64-linux-gnu \
|
||||
@ -945,6 +947,26 @@ RUN if [ ${INSTALL_XMLRPC} = true ]; then \
|
||||
docker-php-ext-install xmlrpc \
|
||||
;fi
|
||||
|
||||
###########################################################################
|
||||
# New Relic for PHP:
|
||||
###########################################################################
|
||||
ARG NEW_RELIC=${NEW_RELIC}
|
||||
ARG NEW_RELIC_KEY=${NEW_RELIC_KEY}
|
||||
ARG NEW_RELIC_APP_NAME=${NEW_RELIC_APP_NAME}
|
||||
|
||||
RUN if [ ${NEW_RELIC} = true ]; then \
|
||||
curl -L http://download.newrelic.com/php_agent/archive/9.9.0.260/newrelic-php5-9.9.0.260-linux.tar.gz | tar -C /tmp -zx && \
|
||||
export NR_INSTALL_USE_CP_NOT_LN=1 && \
|
||||
export NR_INSTALL_SILENT=1 && \
|
||||
/tmp/newrelic-php5-*/newrelic-install install && \
|
||||
rm -rf /tmp/newrelic-php5-* /tmp/nrinstall* && \
|
||||
sed -i \
|
||||
-e 's/"REPLACE_WITH_REAL_KEY"/"${NEW_RELIC_KEY}"/' \
|
||||
-e 's/newrelic.appname = "PHP Application"/newrelic.appname = "${NEW_RELIC_APP_NAME}"/' \
|
||||
-e 's/;newrelic.daemon.app_connect_timeout =.*/newrelic.daemon.app_connect_timeout=15s/' \
|
||||
-e 's/;newrelic.daemon.start_timeout =.*/newrelic.daemon.start_timeout=5s/' \
|
||||
/usr/local/etc/php/conf.d/newrelic.ini \
|
||||
;fi
|
||||
###########################################################################
|
||||
# Downgrade Openssl:
|
||||
###########################################################################
|
||||
|
@ -147,7 +147,7 @@ ARG INSTALL_ZIP_ARCHIVE=false
|
||||
RUN set -eux; \
|
||||
if [ ${INSTALL_ZIP_ARCHIVE} = true ]; then \
|
||||
apk --update add libzip-dev && \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ]; then \
|
||||
if [ ${LARADOCK_PHP_VERSION} = "7.3" ] || [ ${LARADOCK_PHP_VERSION} = "7.4" ] || [ $(php -r "echo PHP_MAJOR_VERSION;") = "8" ]; then \
|
||||
docker-php-ext-configure zip; \
|
||||
else \
|
||||
docker-php-ext-configure zip --with-libzip; \
|
||||
|
23
react/.gitignore
vendored
Normal file
23
react/.gitignore
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
|
||||
|
||||
# dependencies
|
||||
/node_modules
|
||||
/.pnp
|
||||
.pnp.js
|
||||
|
||||
# testing
|
||||
/coverage
|
||||
|
||||
# production
|
||||
/build
|
||||
|
||||
# misc
|
||||
.DS_Store
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
11
react/Dockerfile
Normal file
11
react/Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM node:10
|
||||
|
||||
WORKDIR /usr/src/app/react
|
||||
|
||||
COPY package*.json ./
|
||||
|
||||
RUN npm install node-sass && npm install
|
||||
|
||||
EXPOSE 3000
|
||||
|
||||
CMD ["npm", "start"]
|
68
react/README.md
Normal file
68
react/README.md
Normal file
@ -0,0 +1,68 @@
|
||||
This project was bootstrapped with [react-redux-boilerplate](https://github.com/Marinashafiq/react-redux-boilerplate).
|
||||
|
||||
## Available Scripts
|
||||
|
||||
In the project directory, you can run:
|
||||
|
||||
### `npm start`
|
||||
|
||||
Runs the app in the development mode.<br />
|
||||
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
|
||||
|
||||
The page will reload if you make edits.<br />
|
||||
You will also see any lint errors in the console.
|
||||
|
||||
### `npm test`
|
||||
|
||||
Launches the test runner in the interactive watch mode.<br />
|
||||
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
|
||||
|
||||
### `npm run build`
|
||||
|
||||
Builds the app for production to the `build` folder.<br />
|
||||
It correctly bundles React in production mode and optimizes the build for the best performance.
|
||||
|
||||
The build is minified and the filenames include the hashes.<br />
|
||||
Your app is ready to be deployed!
|
||||
|
||||
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
|
||||
|
||||
### `npm run eject`
|
||||
|
||||
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
|
||||
|
||||
If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
|
||||
|
||||
Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
|
||||
|
||||
You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
|
||||
|
||||
## Learn More
|
||||
|
||||
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
|
||||
|
||||
To learn React, check out the [React documentation](https://reactjs.org/).
|
||||
|
||||
### Code Splitting
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/code-splitting
|
||||
|
||||
### Analyzing the Bundle Size
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/analyzing-the-bundle-size
|
||||
|
||||
### Making a Progressive Web App
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/making-a-progressive-web-app
|
||||
|
||||
### Advanced Configuration
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/advanced-configuration
|
||||
|
||||
### Deployment
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/deployment
|
||||
|
||||
### `npm run build` fails to minify
|
||||
|
||||
This section has moved here: https://facebook.github.io/create-react-app/docs/troubleshooting#npm-run-build-fails-to-minify
|
15979
react/package-lock.json
generated
Normal file
15979
react/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
48
react/package.json
Normal file
48
react/package.json
Normal file
@ -0,0 +1,48 @@
|
||||
{
|
||||
"name": "react-redux-boilerplate",
|
||||
"version": "0.1.0",
|
||||
"private": true,
|
||||
"dependencies": {
|
||||
"@material-ui/core": "^4.11.0",
|
||||
"@material-ui/icons": "^4.9.1",
|
||||
"@material-ui/lab": "^4.0.0-alpha.56",
|
||||
"@testing-library/jest-dom": "^4.2.4",
|
||||
"@testing-library/react": "^9.5.0",
|
||||
"@testing-library/user-event": "^7.2.1",
|
||||
"axios": "^0.19.2",
|
||||
"bootstrap": "^4.5.3",
|
||||
"jss-rtl": "^0.3.0",
|
||||
"node-sass": "^4.14.1",
|
||||
"prop-types": "^15.7.2",
|
||||
"react": "^17.0.1",
|
||||
"react-bootstrap": "^1.4.0",
|
||||
"react-dom": "^17.0.1",
|
||||
"react-intl": "^4.7.6",
|
||||
"react-redux": "^7.2.2",
|
||||
"react-router-dom": "^5.2.0",
|
||||
"react-scripts": "3.4.1",
|
||||
"redux": "^4.0.5",
|
||||
"redux-saga": "^1.1.3"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "react-scripts start",
|
||||
"build": "react-scripts build",
|
||||
"test": "react-scripts test",
|
||||
"eject": "react-scripts eject"
|
||||
},
|
||||
"eslintConfig": {
|
||||
"extends": "react-app"
|
||||
},
|
||||
"browserslist": {
|
||||
"production": [
|
||||
">0.2%",
|
||||
"not dead",
|
||||
"not op_mini all"
|
||||
],
|
||||
"development": [
|
||||
"last 1 chrome version",
|
||||
"last 1 firefox version",
|
||||
"last 1 safari version"
|
||||
]
|
||||
}
|
||||
}
|
BIN
react/public/favicon.ico
Normal file
BIN
react/public/favicon.ico
Normal file
Binary file not shown.
After Width: | Height: | Size: 3.1 KiB |
43
react/public/index.html
Normal file
43
react/public/index.html
Normal file
@ -0,0 +1,43 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8" />
|
||||
<link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
||||
<meta name="theme-color" content="#000000" />
|
||||
<meta
|
||||
name="description"
|
||||
content="Web site created using create-react-app"
|
||||
/>
|
||||
<link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
|
||||
<!--
|
||||
manifest.json provides metadata used when your web app is installed on a
|
||||
user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
|
||||
-->
|
||||
<link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
|
||||
<!--
|
||||
Notice the use of %PUBLIC_URL% in the tags above.
|
||||
It will be replaced with the URL of the `public` folder during the build.
|
||||
Only files inside the `public` folder can be referenced from the HTML.
|
||||
|
||||
Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
|
||||
work correctly both with client-side routing and a non-root public URL.
|
||||
Learn how to configure a non-root public URL by running `npm run build`.
|
||||
-->
|
||||
<title>React App</title>
|
||||
</head>
|
||||
<body>
|
||||
<noscript>You need to enable JavaScript to run this app.</noscript>
|
||||
<div id="root"></div>
|
||||
<!--
|
||||
This HTML file is a template.
|
||||
If you open it directly in the browser, you will see an empty page.
|
||||
|
||||
You can add webfonts, meta tags, or analytics to this file.
|
||||
The build step will place the bundled scripts into the <body> tag.
|
||||
|
||||
To begin the development, run `npm start` or `yarn start`.
|
||||
To create a production bundle, use `npm run build` or `yarn build`.
|
||||
-->
|
||||
</body>
|
||||
</html>
|
BIN
react/public/logo192.png
Normal file
BIN
react/public/logo192.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.2 KiB |
BIN
react/public/logo512.png
Normal file
BIN
react/public/logo512.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 9.4 KiB |
25
react/public/manifest.json
Normal file
25
react/public/manifest.json
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"short_name": "React App",
|
||||
"name": "Create React App Sample",
|
||||
"icons": [
|
||||
{
|
||||
"src": "favicon.ico",
|
||||
"sizes": "64x64 32x32 24x24 16x16",
|
||||
"type": "image/x-icon"
|
||||
},
|
||||
{
|
||||
"src": "logo192.png",
|
||||
"type": "image/png",
|
||||
"sizes": "192x192"
|
||||
},
|
||||
{
|
||||
"src": "logo512.png",
|
||||
"type": "image/png",
|
||||
"sizes": "512x512"
|
||||
}
|
||||
],
|
||||
"start_url": ".",
|
||||
"display": "standalone",
|
||||
"theme_color": "#000000",
|
||||
"background_color": "#ffffff"
|
||||
}
|
3
react/public/robots.txt
Normal file
3
react/public/robots.txt
Normal file
@ -0,0 +1,3 @@
|
||||
# https://www.robotstxt.org/robotstxt.html
|
||||
User-agent: *
|
||||
Disallow:
|
39
react/src/Theme.js
Normal file
39
react/src/Theme.js
Normal file
@ -0,0 +1,39 @@
|
||||
import React, { useState, useEffect } from "react";
|
||||
import { createMuiTheme } from "@material-ui/core/styles";
|
||||
import { ThemeProvider } from "@material-ui/styles";
|
||||
import rtl from "jss-rtl";
|
||||
import { create } from "jss";
|
||||
import { StylesProvider, jssPreset } from "@material-ui/styles";
|
||||
import { useSelector } from "react-redux";
|
||||
import App from "./containers/App";
|
||||
|
||||
function ThemeApp() {
|
||||
const jss = create({ plugins: [...jssPreset().plugins, rtl()] });
|
||||
const lang = useSelector(state => state.lang);
|
||||
const [direction, setDirection] = useState(lang === "en" ? "ltr" : "rtl");
|
||||
|
||||
useEffect(() => {
|
||||
setDirection(lang === "en" ? "ltr" : "rtl");
|
||||
}, [lang]);
|
||||
|
||||
const theme = createMuiTheme({
|
||||
direction: direction,
|
||||
palette: {
|
||||
primary: {
|
||||
main: "#1976d2"
|
||||
},
|
||||
secondary: {
|
||||
main: "#ac4556"
|
||||
}
|
||||
}
|
||||
});
|
||||
return (
|
||||
<StylesProvider jss={jss}>
|
||||
<ThemeProvider theme={theme}>
|
||||
<App />
|
||||
</ThemeProvider>
|
||||
</StylesProvider>
|
||||
);
|
||||
}
|
||||
|
||||
export default ThemeApp;
|
13
react/src/assets/Local/ar.js
Normal file
13
react/src/assets/Local/ar.js
Normal file
@ -0,0 +1,13 @@
|
||||
export default {
|
||||
ar: {
|
||||
hello: "مرحبا",
|
||||
langBtn : "English",
|
||||
home: {
|
||||
content:
|
||||
'لوريم إيبسوم(Lorem Ipsum) هو ببساطة نص شكلي (بمعنى أن الغاية هي الشكل وليس المحتوى) ويُستخدم في صناعات المطابع ودور النشر. كان لوريم إيبسوم ولايزال المعيار للنص الشكلي منذ القرن الخامس عشر عندما قامت مطبعة مجهولة برص مجموعة من الأحرف بشكل عشوائي أخذتها من نص، لتكوّن كتيّب بمثابة دليل أو مرجع شكلي لهذه الأحرف. خمسة قرون من الزمن لم تقضي على هذا النص، بل انه حتى صار مستخدماً وبشكله الأصلي في الطباعة والتنضيد الإلكتروني. انتشر بشكل كبير في ستينيّات هذا القرن مع إصدار رقائق "ليتراسيت" (Letraset) البلاستيكية تحوي مقاطع من هذا النص، وعاد لينتشر مرة أخرى مؤخراَ مع ظهور برامج النشر الإلكتروني مثل "ألدوس بايج مايكر" (Aldus PageMaker) والتي حوت أيضاً على نسخ من نص لوريم إيبسوم.'
|
||||
},
|
||||
snackbar: {
|
||||
'success' : 'تم بنجاح'
|
||||
}
|
||||
}
|
||||
};
|
13
react/src/assets/Local/en.js
Normal file
13
react/src/assets/Local/en.js
Normal file
@ -0,0 +1,13 @@
|
||||
export default {
|
||||
en: {
|
||||
hello : 'Hello',
|
||||
langBtn : "عربى",
|
||||
home: {
|
||||
content:
|
||||
"What is Lorem Ipsum?Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book. It has survived not only five centuries, but also the leap into electronic typesetting, remaining essentially unchanged. It was popularised in the 1960s with the release of Letraset sheets containing Lorem Ipsum passages, and more recently with desktop publishing software like Aldus PageMaker including versions of Lorem Ipsum."
|
||||
},
|
||||
snackbar: {
|
||||
'success' : 'Done successfully'
|
||||
}
|
||||
}
|
||||
}
|
9
react/src/assets/Local/messages.js
Normal file
9
react/src/assets/Local/messages.js
Normal file
@ -0,0 +1,9 @@
|
||||
import en from './en';
|
||||
import ar from './ar';
|
||||
|
||||
const messages = {
|
||||
...ar,
|
||||
...en
|
||||
}
|
||||
|
||||
export default messages;
|
BIN
react/src/assets/fonts/Roboto/Roboto-Black.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Black.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-BlackItalic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-BlackItalic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Bold.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Bold.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-BoldItalic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-BoldItalic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Italic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Italic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Light.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Light.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-LightItalic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-LightItalic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Medium.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Medium.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-MediumItalic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-MediumItalic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Regular.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Regular.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-Thin.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-Thin.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/fonts/Roboto/Roboto-ThinItalic.ttf
Normal file
BIN
react/src/assets/fonts/Roboto/Roboto-ThinItalic.ttf
Normal file
Binary file not shown.
BIN
react/src/assets/images/reactjs.jpg
Normal file
BIN
react/src/assets/images/reactjs.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 134 KiB |
10
react/src/components/Controls/Button/Button.js
Normal file
10
react/src/components/Controls/Button/Button.js
Normal file
@ -0,0 +1,10 @@
|
||||
import React from "react";
|
||||
import Button from '@material-ui/core/Button';
|
||||
|
||||
export const Btn = ({text , handleClick}) => {
|
||||
return (
|
||||
<Button variant="contained" color="primary" onClick={handleClick}>
|
||||
{text}
|
||||
</Button>
|
||||
);
|
||||
};
|
32
react/src/components/Controls/InputField/InputField.js
Normal file
32
react/src/components/Controls/InputField/InputField.js
Normal file
@ -0,0 +1,32 @@
|
||||
import React from "react";
|
||||
import { TextField } from "@material-ui/core";
|
||||
|
||||
export const InputField = ({
|
||||
name,
|
||||
label,
|
||||
value,
|
||||
error,
|
||||
handleChange,
|
||||
helperText,
|
||||
isMultiline,
|
||||
isRequired
|
||||
}) => {
|
||||
|
||||
return (
|
||||
<TextField
|
||||
className="my-3"
|
||||
name={name}
|
||||
type="text"
|
||||
label={isRequired ? label+"*" : label}
|
||||
inputProps={{ maxLength: isMultiline ? 500 : 50 }}
|
||||
variant="outlined"
|
||||
fullWidth
|
||||
value={value}
|
||||
error={error}
|
||||
helperText={error && helperText}
|
||||
onChange={handleChange}
|
||||
multiline={isMultiline}
|
||||
rows={isMultiline ? 3 : 1}
|
||||
/>
|
||||
);
|
||||
};
|
14
react/src/components/Loader/Loader.js
Normal file
14
react/src/components/Loader/Loader.js
Normal file
@ -0,0 +1,14 @@
|
||||
import React from "react";
|
||||
import "./Loader.scss";
|
||||
|
||||
const Loader = () => {
|
||||
return (
|
||||
<div className="spinnerContainer d-flex justify-content-center align-items-center h-100">
|
||||
<div className="spinner-border text-primary" role="status">
|
||||
<span className="sr-only">Loading...</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default Loader;
|
31
react/src/components/Loader/Loader.scss
Normal file
31
react/src/components/Loader/Loader.scss
Normal file
@ -0,0 +1,31 @@
|
||||
.spinnerContainer {
|
||||
height: 100vh;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.loading-indicator:before {
|
||||
content: "";
|
||||
background: #000000cc;
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
top: 0;
|
||||
left: 0;
|
||||
z-index: 1000;
|
||||
}
|
||||
|
||||
.loading-indicator:after {
|
||||
content: "\f1ce";
|
||||
font-family: FontAwesome;
|
||||
position: fixed;
|
||||
width: 100%;
|
||||
top: 50%;
|
||||
left: 0;
|
||||
z-index: 1001;
|
||||
color: white;
|
||||
text-align: center;
|
||||
font-weight: 100;
|
||||
font-size: 4rem;
|
||||
-webkit-animation: fa-spin 1s infinite linear;
|
||||
animation: fa-spin 1s infinite linear;
|
||||
}
|
33
react/src/components/Navbar/Navbar.js
Normal file
33
react/src/components/Navbar/Navbar.js
Normal file
@ -0,0 +1,33 @@
|
||||
import React from "react";
|
||||
import messages from "./../../assets/Local/messages";
|
||||
import { useSelector, useDispatch } from "react-redux";
|
||||
import { setCurrentLang } from "../../store/Lang/LangAction";
|
||||
import { Link } from "react-router-dom";
|
||||
import { Btn } from "../Controls/Button/Button";
|
||||
|
||||
export default function Navbar() {
|
||||
const lang = useSelector(state => state.lang);
|
||||
const dispatch = useDispatch();
|
||||
const message = messages[lang];
|
||||
const switchLanguage = lang => {
|
||||
dispatch(setCurrentLang(lang === "ar" ? "en" : "ar"));
|
||||
};
|
||||
|
||||
return (
|
||||
<>
|
||||
<nav className="navbar navbar-dark bg-dark">
|
||||
<a className="navbar-brand">{message.hello}</a>
|
||||
<div className="d-flex align-items-center">
|
||||
{/* This private route won't be accessible if no token in lcoal storage */}
|
||||
<Link to="/" className="text-white mx-3">
|
||||
Private Route
|
||||
</Link>
|
||||
<Btn
|
||||
handleClick={() => switchLanguage(lang)}
|
||||
text={message.langBtn}
|
||||
/>
|
||||
</div>
|
||||
</nav>
|
||||
</>
|
||||
);
|
||||
}
|
12
react/src/components/NotFound/NotFound.js
Normal file
12
react/src/components/NotFound/NotFound.js
Normal file
@ -0,0 +1,12 @@
|
||||
import React from "react";
|
||||
|
||||
const NotFound = () => {
|
||||
return (
|
||||
<React.Fragment>
|
||||
<div className="text-center">
|
||||
<h1 className="my-5 pt-5">Sorry we can’t find this page</h1>
|
||||
</div>
|
||||
</React.Fragment>
|
||||
);
|
||||
};
|
||||
export default NotFound;
|
33
react/src/components/Snackbar/Snackbar.js
Normal file
33
react/src/components/Snackbar/Snackbar.js
Normal file
@ -0,0 +1,33 @@
|
||||
import React from "react";
|
||||
import Snackbar from "@material-ui/core/Snackbar";
|
||||
import MuiAlert from "@material-ui/lab/Alert";
|
||||
import { useSelector, useDispatch } from "react-redux";
|
||||
import { hideSnackbarAction } from "../../store/Snackbar/SnackbarAction";
|
||||
|
||||
function Alert(props) {
|
||||
return <MuiAlert elevation={6} variant="filled" {...props} />;
|
||||
}
|
||||
|
||||
export function MaterialSnackbar(props) {
|
||||
const { isOpen, message, type } = useSelector(state => state.snackbar);
|
||||
const dispatch = useDispatch();
|
||||
const handleClose = (event, reason) => {
|
||||
if (reason === "clickaway") {
|
||||
return;
|
||||
}
|
||||
dispatch(hideSnackbarAction());
|
||||
};
|
||||
return (
|
||||
<Snackbar
|
||||
open={isOpen}
|
||||
autoHideDuration={4000}
|
||||
anchorOrigin={{ vertical: "bottom", horizontal: "center" }}
|
||||
key={`bottom,center`}
|
||||
onClose={() => handleClose}
|
||||
>
|
||||
<Alert onClose={handleClose} severity={type} className="medium_font">
|
||||
{message}
|
||||
</Alert>
|
||||
</Snackbar>
|
||||
);
|
||||
}
|
40
react/src/containers/App.js
Normal file
40
react/src/containers/App.js
Normal file
@ -0,0 +1,40 @@
|
||||
import React from "react";
|
||||
import Navbar from "../components/Navbar/Navbar";
|
||||
import { Router } from "react-router-dom";
|
||||
import history from "../routes/History";
|
||||
import Routes from "../routes/Routes";
|
||||
import { IntlProvider } from "react-intl";
|
||||
import messages from "../assets/Local/messages";
|
||||
import { MaterialSnackbar } from "../components/Snackbar/Snackbar";
|
||||
import Loader from "../components/Loader/Loader";
|
||||
import "./App.scss";
|
||||
import { connect } from "react-redux";
|
||||
|
||||
class App extends React.Component {
|
||||
// App contains routes and also wrapped with snackbar and intl for localization
|
||||
render() {
|
||||
const { lang , loading } = this.props;
|
||||
return (
|
||||
<IntlProvider locale={lang} messages={messages[lang]}>
|
||||
<div
|
||||
className={lang === "ar" ? "rtl" : "ltr"}
|
||||
dir={lang === "ar" ? "rtl" : "ltr"}
|
||||
>
|
||||
{loading ? <Loader /> : null}
|
||||
<Router history={history}>
|
||||
<MaterialSnackbar />
|
||||
<Navbar />
|
||||
{Routes}
|
||||
</Router>
|
||||
</div>
|
||||
</IntlProvider>
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = ({ lang, loading }) => ({
|
||||
lang,
|
||||
loading
|
||||
});
|
||||
|
||||
export default connect(mapStateToProps, null)(App);
|
1
react/src/containers/App.scss
Normal file
1
react/src/containers/App.scss
Normal file
@ -0,0 +1 @@
|
||||
@import '../scss/base.scss';
|
23
react/src/containers/Home/Home.js
Normal file
23
react/src/containers/Home/Home.js
Normal file
@ -0,0 +1,23 @@
|
||||
import React from 'react';
|
||||
import messages from "./../../assets/Local/messages";
|
||||
import { connect } from 'react-redux';
|
||||
|
||||
class Home extends React.Component {
|
||||
render(){
|
||||
const { lang } = this.props;
|
||||
const message = messages[lang]
|
||||
return(
|
||||
<div className="container my-5">
|
||||
<p>{message.home.content}</p>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const mapStateToProps = (state) => {
|
||||
return {
|
||||
lang : state.lang
|
||||
}
|
||||
}
|
||||
|
||||
export default connect(mapStateToProps,null)(Home);
|
23
react/src/containers/Login/Login.js
Normal file
23
react/src/containers/Login/Login.js
Normal file
@ -0,0 +1,23 @@
|
||||
import React from 'react';
|
||||
import {Btn} from '../../components/Controls/Button/Button';
|
||||
import History from '../../routes/History';
|
||||
class Login extends React.Component {
|
||||
|
||||
// this method is only to trigger route guards , remove and use your own logic
|
||||
handleLogin = () => {
|
||||
localStorage.setItem('token','token');
|
||||
History.push('/')
|
||||
}
|
||||
|
||||
render(){
|
||||
return(
|
||||
<div className="container my-5">
|
||||
<h1>Login Page</h1>
|
||||
<Btn text='Login' handleClick={this.handleLogin}/>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export default Login;
|
12
react/src/index.js
Normal file
12
react/src/index.js
Normal file
@ -0,0 +1,12 @@
|
||||
import React from "react";
|
||||
import ReactDOM from "react-dom";
|
||||
import { Provider } from "react-redux";
|
||||
import store from "./store";
|
||||
import ThemeApp from "./Theme";
|
||||
|
||||
ReactDOM.render(
|
||||
<Provider store={store}>
|
||||
<ThemeApp />
|
||||
</Provider>,
|
||||
document.querySelector('#root')
|
||||
);
|
19
react/src/network/apis/index.js
Normal file
19
react/src/network/apis/index.js
Normal file
@ -0,0 +1,19 @@
|
||||
import axios from "axios";
|
||||
import { requestHandler, successHandler, errorHandler } from "../interceptors";
|
||||
import { BASE_URL } from "../../utils/Constants";
|
||||
|
||||
//add your BASE_URL to Constants file
|
||||
export const axiosInstance = axios.create({
|
||||
baseURL: BASE_URL,
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
});
|
||||
|
||||
// Handle request process
|
||||
axiosInstance.interceptors.request.use(request => requestHandler(request));
|
||||
// Handle response process
|
||||
axiosInstance.interceptors.response.use(
|
||||
response => successHandler(response),
|
||||
error => errorHandler(error)
|
||||
);
|
35
react/src/network/interceptors/index.js
Normal file
35
react/src/network/interceptors/index.js
Normal file
@ -0,0 +1,35 @@
|
||||
import store from "../../store";
|
||||
import { loader } from "../../store/Loader/LoaderAction";
|
||||
import Auth from "../../utils/Auth";
|
||||
|
||||
export const isHandlerEnabled = (config = {}) => {
|
||||
return config.hasOwnProperty("handlerEnabled") && !config.handlerEnabled
|
||||
? false
|
||||
: true;
|
||||
};
|
||||
|
||||
export const requestHandler = request => {
|
||||
if (isHandlerEnabled(request)) {
|
||||
// Modify request here
|
||||
store.dispatch(loader(true));
|
||||
}
|
||||
return request;
|
||||
};
|
||||
|
||||
export const successHandler = response => {
|
||||
if (isHandlerEnabled(response)) {
|
||||
// Hanlde Response
|
||||
store.dispatch(loader(false));
|
||||
}
|
||||
return response;
|
||||
};
|
||||
|
||||
export const errorHandler = error => {
|
||||
if (isHandlerEnabled(error.config)) {
|
||||
store.dispatch(loader(false));
|
||||
// You can decide what you need to do to handle errors.
|
||||
// here's example for unautherized user to log them out .
|
||||
// error.response.status === 401 && Auth.signOut();
|
||||
}
|
||||
return Promise.reject({ ...error });
|
||||
};
|
2
react/src/routes/History.js
Normal file
2
react/src/routes/History.js
Normal file
@ -0,0 +1,2 @@
|
||||
import { createBrowserHistory } from "history";
|
||||
export default createBrowserHistory();
|
22
react/src/routes/Routes.js
Normal file
22
react/src/routes/Routes.js
Normal file
@ -0,0 +1,22 @@
|
||||
import React, { Suspense } from "react";
|
||||
import { Router, Switch } from "react-router-dom";
|
||||
import history from "./History";
|
||||
import * as LazyComponent from "../utils/LazyLoaded";
|
||||
import Loader from "../components/Loader/Loader";
|
||||
import PrivateRoute from "../utils/PrivateRoute";
|
||||
|
||||
const Routes = (
|
||||
<Suspense fallback={<Loader />}>
|
||||
<Router history={history}>
|
||||
<Switch>
|
||||
{/* For private routes */}
|
||||
<PrivateRoute component={LazyComponent.Home} path="/" exact />
|
||||
{/* Public routes that doesn't need any auth */}
|
||||
<LazyComponent.Login path="/login" exact />
|
||||
<LazyComponent.NotFound path="**" title="This page doesn't exist..." exact />
|
||||
</Switch>
|
||||
</Router>
|
||||
</Suspense>
|
||||
);
|
||||
|
||||
export default Routes;
|
1
react/src/scss/_general.scss
Normal file
1
react/src/scss/_general.scss
Normal file
@ -0,0 +1 @@
|
||||
// Add you general and shared styles here
|
4
react/src/scss/_rtl.scss
Normal file
4
react/src/scss/_rtl.scss
Normal file
@ -0,0 +1,4 @@
|
||||
.rtl{
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
text-align: right;
|
||||
}
|
2
react/src/scss/_variables.scss
Normal file
2
react/src/scss/_variables.scss
Normal file
@ -0,0 +1,2 @@
|
||||
$primaryColor: rgb(50, 61, 165);
|
||||
$secondaryColor : rgba(62, 62, 62, 1);
|
4
react/src/scss/base.scss
Normal file
4
react/src/scss/base.scss
Normal file
@ -0,0 +1,4 @@
|
||||
@import 'bootstrap/scss/bootstrap';
|
||||
@import './variables';
|
||||
@import './rtl';
|
||||
@import './general';
|
11
react/src/store/Feature1/FeatureAction.js
Normal file
11
react/src/store/Feature1/FeatureAction.js
Normal file
@ -0,0 +1,11 @@
|
||||
import * as types from "./FeatureTypes";
|
||||
|
||||
//Replace action name and update action types
|
||||
export const actionRequest = () => ({
|
||||
type: types.GET_DATA_REQUEST
|
||||
});
|
||||
|
||||
export const actionReceive = payload => ({
|
||||
type: types.GET_DATA_REQUEST,
|
||||
payload
|
||||
});
|
11
react/src/store/Feature1/FeatureApis.js
Normal file
11
react/src/store/Feature1/FeatureApis.js
Normal file
@ -0,0 +1,11 @@
|
||||
import {axiosInstance} from '../../network/apis';
|
||||
const handlerEnabled = false;
|
||||
|
||||
// Replace endpoint and change api name
|
||||
const apiExampleRequest = async () => {
|
||||
return await axiosInstance.get(`ENDPOINT`, { handlerEnabled });
|
||||
};
|
||||
|
||||
export default {
|
||||
apiExampleRequest
|
||||
};
|
16
react/src/store/Feature1/FeatureReducer.js
Normal file
16
react/src/store/Feature1/FeatureReducer.js
Normal file
@ -0,0 +1,16 @@
|
||||
import * as types from "./FeatureTypes";
|
||||
|
||||
const INITIAL_STATE = {};
|
||||
|
||||
// Replace with you own reducer
|
||||
export default (state = INITIAL_STATE, action) => {
|
||||
switch (action.type) {
|
||||
case types.GET_DATA_RECEIVE:
|
||||
return {
|
||||
...state,
|
||||
...action.payload
|
||||
};
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
21
react/src/store/Feature1/FeatureSagas.js
Normal file
21
react/src/store/Feature1/FeatureSagas.js
Normal file
@ -0,0 +1,21 @@
|
||||
import { call, put } from "redux-saga/effects";
|
||||
import API from "./FeatureApis";
|
||||
import * as ACTIONS from "./FeatureAction";
|
||||
import { dispatchSnackbarError } from "../../utils/Shared";
|
||||
import { takeLatest } from "redux-saga/effects";
|
||||
import * as TYPES from "./FeatureTypes";
|
||||
|
||||
// Replace with your sagas
|
||||
export function* sagasRequestExample() {
|
||||
try {
|
||||
const response = yield call(API.apiExampleRequest);
|
||||
yield put(ACTIONS.actionReceive(response.data));
|
||||
} catch (err) {
|
||||
dispatchSnackbarError(err.response.data);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export function* FeatureSaga1() {
|
||||
yield takeLatest(TYPES.GET_DATA_REQUEST, sagasRequestExample);
|
||||
}
|
3
react/src/store/Feature1/FeatureTypes.js
Normal file
3
react/src/store/Feature1/FeatureTypes.js
Normal file
@ -0,0 +1,3 @@
|
||||
// Replace with your request types
|
||||
export const GET_DATA_REQUEST = 'GET_DATA_REQUEST';
|
||||
export const GET_DATA_RECEIVE = 'GET_DATA_RECEIVE';
|
10
react/src/store/Lang/LangAction.js
Normal file
10
react/src/store/Lang/LangAction.js
Normal file
@ -0,0 +1,10 @@
|
||||
import * as types from './LangTypes';
|
||||
|
||||
export const setCurrentLang = payload => {
|
||||
localStorage.setItem('lang', payload);
|
||||
return { type: types.SET_LANG, payload };
|
||||
}
|
||||
|
||||
export const getCurrentLang = () => {
|
||||
return { type: types.GET_LANG };
|
||||
};
|
14
react/src/store/Lang/LangReducer.js
Normal file
14
react/src/store/Lang/LangReducer.js
Normal file
@ -0,0 +1,14 @@
|
||||
import * as types from "./LangTypes";
|
||||
|
||||
const INITIAL_STATE = localStorage.getItem("lang") || "en";
|
||||
|
||||
export default function locale(state = INITIAL_STATE, action) {
|
||||
switch (action.type) {
|
||||
case types.SET_LANG:
|
||||
return action.payload;
|
||||
case types.GET_LANG:
|
||||
return action.payload;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
}
|
2
react/src/store/Lang/LangTypes.js
Normal file
2
react/src/store/Lang/LangTypes.js
Normal file
@ -0,0 +1,2 @@
|
||||
export const SET_LANG = 'SET_LANG';
|
||||
export const GET_LANG = 'GET_LANG';
|
13
react/src/store/Loader/LoaderAction.js
Normal file
13
react/src/store/Loader/LoaderAction.js
Normal file
@ -0,0 +1,13 @@
|
||||
import * as types from "./LoaderTypes";
|
||||
|
||||
export const loader = isLoading => {
|
||||
return isLoading
|
||||
? {
|
||||
type: types.SHOW_LOADER,
|
||||
data: isLoading
|
||||
}
|
||||
: {
|
||||
type: types.HIDE_LOADER,
|
||||
data: isLoading
|
||||
};
|
||||
};
|
14
react/src/store/Loader/LoaderReducer.js
Normal file
14
react/src/store/Loader/LoaderReducer.js
Normal file
@ -0,0 +1,14 @@
|
||||
import * as types from "./LoaderTypes";
|
||||
|
||||
const INITIAL_STATE = false;
|
||||
|
||||
export default (state = INITIAL_STATE, action) => {
|
||||
switch (action.type) {
|
||||
case types.SHOW_LOADER:
|
||||
return action.data;
|
||||
case types.HIDE_LOADER:
|
||||
return action.data;
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
2
react/src/store/Loader/LoaderTypes.js
Normal file
2
react/src/store/Loader/LoaderTypes.js
Normal file
@ -0,0 +1,2 @@
|
||||
export const SHOW_LOADER = 'SHOW_LOADER';
|
||||
export const HIDE_LOADER = 'HIDE_LOADER';
|
15
react/src/store/Snackbar/SnackbarAction.js
Normal file
15
react/src/store/Snackbar/SnackbarAction.js
Normal file
@ -0,0 +1,15 @@
|
||||
import * as types from './SnackbarTypes';
|
||||
|
||||
export const showSnackbarAction = (message , snacknarType) => {
|
||||
return {
|
||||
type: types.SHOW_SNACKBAR,
|
||||
message ,
|
||||
snacknarType
|
||||
};
|
||||
};
|
||||
|
||||
export const hideSnackbarAction = () => {
|
||||
return {
|
||||
type: types.HIDE_SNACKBAR
|
||||
};
|
||||
};
|
21
react/src/store/Snackbar/SnackbarReducer.js
Normal file
21
react/src/store/Snackbar/SnackbarReducer.js
Normal file
@ -0,0 +1,21 @@
|
||||
import * as types from "./SnackbarTypes";
|
||||
|
||||
export default (state = {}, action) => {
|
||||
switch (action.type) {
|
||||
case types.SHOW_SNACKBAR:
|
||||
return {
|
||||
...state,
|
||||
isOpen: true,
|
||||
message: action.message,
|
||||
type: action.snacknarType
|
||||
};
|
||||
case types.HIDE_SNACKBAR:
|
||||
return {
|
||||
...state,
|
||||
isOpen: false
|
||||
};
|
||||
default:
|
||||
return state;
|
||||
}
|
||||
};
|
||||
|
2
react/src/store/Snackbar/SnackbarTypes.js
Normal file
2
react/src/store/Snackbar/SnackbarTypes.js
Normal file
@ -0,0 +1,2 @@
|
||||
export const SHOW_SNACKBAR = 'SHOW_SNACKBAR';
|
||||
export const HIDE_SNACKBAR = 'HIDE_SNACKBAR';
|
17
react/src/store/index.js
Normal file
17
react/src/store/index.js
Normal file
@ -0,0 +1,17 @@
|
||||
import { createStore, applyMiddleware, compose } from "redux";
|
||||
import reducers from "./reducers";
|
||||
import createSagaMiddleware from "redux-saga";
|
||||
import { watchSagas } from "./sagas";
|
||||
const saga = createSagaMiddleware();
|
||||
//redux dev tool
|
||||
const composeEnhancers =
|
||||
typeof window === "object" && window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__
|
||||
? window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__({})
|
||||
: compose;
|
||||
const enhancer = composeEnhancers(applyMiddleware(saga));
|
||||
|
||||
const store = createStore(reducers, enhancer);
|
||||
|
||||
saga.run(watchSagas);
|
||||
|
||||
export default store;
|
12
react/src/store/reducers/index.js
Normal file
12
react/src/store/reducers/index.js
Normal file
@ -0,0 +1,12 @@
|
||||
import { combineReducers } from "redux";
|
||||
import lang from "../Lang/LangReducer";
|
||||
import loader from "../Loader/LoaderReducer";
|
||||
import snackbar from "../Snackbar/SnackbarReducer";
|
||||
import Feature1 from "../Feature1/FeatureReducer";
|
||||
|
||||
export default combineReducers({
|
||||
lang,
|
||||
loader,
|
||||
snackbar,
|
||||
Feature1
|
||||
});
|
9
react/src/store/sagas/index.js
Normal file
9
react/src/store/sagas/index.js
Normal file
@ -0,0 +1,9 @@
|
||||
import { FeatureSaga1 } from '../Feature1/FeatureSagas';
|
||||
import { fork, all } from "redux-saga/effects";
|
||||
|
||||
export function* watchSagas() {
|
||||
//Combine sagas with
|
||||
yield all([FeatureSaga1()]);
|
||||
// OR
|
||||
// yield all([fork(FeatureSaga1)]);
|
||||
}
|
10
react/src/utils/Auth.js
Normal file
10
react/src/utils/Auth.js
Normal file
@ -0,0 +1,10 @@
|
||||
// Service to check authentication for user and to signOut
|
||||
const Auth = {
|
||||
signOut() {
|
||||
localStorage.removeItem("token");
|
||||
},
|
||||
isAuth() {
|
||||
return localStorage.getItem("token");
|
||||
}
|
||||
};
|
||||
export default Auth;
|
1
react/src/utils/Constants.js
Normal file
1
react/src/utils/Constants.js
Normal file
@ -0,0 +1 @@
|
||||
export const BASE_URL = 'BASE_URL';
|
5
react/src/utils/LazyLoaded.js
Normal file
5
react/src/utils/LazyLoaded.js
Normal file
@ -0,0 +1,5 @@
|
||||
import React from "react";
|
||||
|
||||
export const Home = React.lazy(() => import('../containers/Home/Home'));
|
||||
export const Login = React.lazy(() => import('../containers/Login/Login'));
|
||||
export const NotFound = React.lazy(() => import('../components/NotFound/NotFound'));
|
18
react/src/utils/PrivateRoute.js
Normal file
18
react/src/utils/PrivateRoute.js
Normal file
@ -0,0 +1,18 @@
|
||||
import React from "react";
|
||||
import { Route, Redirect } from "react-router-dom";
|
||||
import Auth from "../utils/Auth";
|
||||
|
||||
const PrivateRoute = ({ component: Component, ...rest }) => {
|
||||
return (
|
||||
// Show the component only when the user is logged in
|
||||
// Otherwise, redirect the user to /signin page
|
||||
<Route
|
||||
{...rest}
|
||||
render={props =>
|
||||
Auth.isAuth() ? <Component {...props} /> : <Redirect to="/login" />
|
||||
}
|
||||
/>
|
||||
);
|
||||
};
|
||||
|
||||
export default PrivateRoute;
|
18
react/src/utils/Shared.js
Normal file
18
react/src/utils/Shared.js
Normal file
@ -0,0 +1,18 @@
|
||||
import store from "../store";
|
||||
import { showSnackbarAction } from "../store/Snackbar/SnackbarAction";
|
||||
import messages from "../assets/Local/messages";
|
||||
|
||||
// To show error message that returned from backend
|
||||
export function dispatchSnackbarError(data) {
|
||||
if (data) {
|
||||
const errorMsg = data.error.message;
|
||||
store.dispatch(showSnackbarAction(errorMsg, "error"));
|
||||
}
|
||||
}
|
||||
// To show success message after any success request if needed and rendered from locale files
|
||||
export function dispatchSnackbarSuccess(message) {
|
||||
const lang = store.getState().lang;
|
||||
store.dispatch(
|
||||
showSnackbarAction(messages[lang].snackbar[message], "success")
|
||||
);
|
||||
}
|
0
workspace/.npmrc
Normal file
0
workspace/.npmrc
Normal file
@ -198,6 +198,14 @@ COPY ./crontab /etc/cron.d
|
||||
|
||||
RUN chmod -R 644 /etc/cron.d
|
||||
|
||||
###########################################################################
|
||||
# Update Repositories
|
||||
###########################################################################
|
||||
|
||||
USER root
|
||||
|
||||
RUN apt-get update -yqq
|
||||
|
||||
###########################################################################
|
||||
# Drush:
|
||||
###########################################################################
|
||||
@ -350,9 +358,26 @@ USER root
|
||||
ARG INSTALL_XDEBUG=false
|
||||
|
||||
RUN if [ ${INSTALL_XDEBUG} = true ]; then \
|
||||
# Load the xdebug extension only with phpunit commands
|
||||
apt-get install -y php${LARADOCK_PHP_VERSION}-xdebug && \
|
||||
sed -i 's/^;//g' /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-xdebug.ini \
|
||||
# Install the xdebug extension
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \
|
||||
pecl install xdebug-2.5.5; \
|
||||
else \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ] && [ $(php -r "echo PHP_MINOR_VERSION;") = "0" ]; then \
|
||||
pecl install xdebug-2.9.0; \
|
||||
else \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ] && [ $(php -r "echo PHP_MINOR_VERSION;") = "1" ]; then \
|
||||
pecl install xdebug-2.9.8; \
|
||||
else \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "7" ]; then \
|
||||
pecl install xdebug-2.9.8; \
|
||||
else \
|
||||
#pecl install xdebug; \
|
||||
echo "xDebug 3 required, not supported."; \
|
||||
fi \
|
||||
fi \
|
||||
fi \
|
||||
fi && \
|
||||
echo "zend_extension=xdebug.so" >> /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-xdebug.ini \
|
||||
;fi
|
||||
|
||||
# ADD for REMOTE debugging
|
||||
@ -444,10 +469,11 @@ RUN if [ ${INSTALL_MONGO} = true ]; then \
|
||||
pecl install mongo && \
|
||||
echo "extension=mongo.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongo.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongo.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-mongo.ini \
|
||||
;fi && \
|
||||
pecl install mongodb && \
|
||||
echo "extension=mongodb.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongodb.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongodb.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-mongodb.ini \
|
||||
;else \
|
||||
pecl install mongodb && \
|
||||
echo "extension=mongodb.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongodb.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/mongodb.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/30-mongodb.ini \
|
||||
;fi \
|
||||
;fi
|
||||
|
||||
###########################################################################
|
||||
@ -573,9 +599,15 @@ RUN if [ ${INSTALL_LIBPNG} = true ]; then \
|
||||
ARG INSTALL_INOTIFY=false
|
||||
|
||||
RUN if [ ${INSTALL_INOTIFY} = true ]; then \
|
||||
pecl -q install inotify && \
|
||||
echo "extension=inotify.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-inotify.ini \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") != "5" ]; then \
|
||||
pecl -q install inotify-0.1.6 && \
|
||||
echo "extension=inotify.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-inotify.ini \
|
||||
else \
|
||||
pecl -q install inotify && \
|
||||
echo "extension=inotify.so" >> /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini && \
|
||||
ln -s /etc/php/${LARADOCK_PHP_VERSION}/mods-available/inotify.ini /etc/php/${LARADOCK_PHP_VERSION}/cli/conf.d/20-inotify.ini \
|
||||
;fi \
|
||||
;fi
|
||||
|
||||
###########################################################################
|
||||
@ -666,6 +698,14 @@ ARG INSTALL_NPM_VUE_CLI=false
|
||||
ARG INSTALL_NPM_ANGULAR_CLI=false
|
||||
ARG NPM_REGISTRY
|
||||
ENV NPM_REGISTRY ${NPM_REGISTRY}
|
||||
ARG NPM_FETCH_RETRIES
|
||||
ENV NPM_FETCH_RETRIES ${NPM_FETCH_RETRIES}
|
||||
ARG NPM_FETCH_RETRY_FACTOR
|
||||
ENV NPM_FETCH_RETRY_FACTOR ${NPM_FETCH_RETRY_FACTOR}
|
||||
ARG NPM_FETCH_RETRY_MINTIMEOUT
|
||||
ENV NPM_FETCH_RETRY_MINTIMEOUT ${NPM_FETCH_RETRY_MINTIMEOUT}
|
||||
ARG NPM_FETCH_RETRY_MAXTIMEOUT
|
||||
ENV NPM_FETCH_RETRY_MAXTIMEOUT ${NPM_FETCH_RETRY_MAXTIMEOUT}
|
||||
ENV NVM_DIR /home/laradock/.nvm
|
||||
ARG NVM_NODEJS_ORG_MIRROR
|
||||
ENV NVM_NODEJS_ORG_MIRROR ${NVM_NODEJS_ORG_MIRROR}
|
||||
@ -678,6 +718,10 @@ RUN if [ ${INSTALL_NODE} = true ]; then \
|
||||
&& nvm install ${NODE_VERSION} \
|
||||
&& nvm use ${NODE_VERSION} \
|
||||
&& nvm alias ${NODE_VERSION} \
|
||||
&& npm config set fetch-retries ${NPM_FETCH_RETRIES} \
|
||||
&& npm config set fetch-retry-factor ${NPM_FETCH_RETRY_FACTOR} \
|
||||
&& npm config set fetch-retry-mintimeout ${NPM_FETCH_RETRY_MINTIMEOUT} \
|
||||
&& npm config set fetch-retry-maxtimeout ${NPM_FETCH_RETRY_MAXTIMEOUT} \
|
||||
&& if [ ${NPM_REGISTRY} ]; then \
|
||||
npm config set registry ${NPM_REGISTRY} \
|
||||
;fi \
|
||||
@ -734,6 +778,10 @@ RUN if [ ${NPM_REGISTRY} ]; then \
|
||||
. ~/.bashrc && npm config set registry ${NPM_REGISTRY} \
|
||||
;fi
|
||||
|
||||
# Mount .npmrc into home folder
|
||||
COPY ./.npmrc /root/.npmrc
|
||||
COPY ./.npmrc /home/laradock/.npmrc
|
||||
|
||||
|
||||
###########################################################################
|
||||
# PNPM:
|
||||
@ -829,6 +877,7 @@ RUN set -xe; \
|
||||
|
||||
USER root
|
||||
ARG INSTALL_OCI8=false
|
||||
ARG ORACLE_INSTANT_CLIENT_MIRROR=https://github.com/diogomascarenha/oracle-instantclient/raw/master/
|
||||
|
||||
ENV LD_LIBRARY_PATH="/opt/oracle/instantclient_12_1"
|
||||
ENV OCI_HOME="/opt/oracle/instantclient_12_1"
|
||||
@ -842,8 +891,8 @@ RUN if [ ${INSTALL_OCI8} = true ]; then \
|
||||
# Install Oracle Instantclient
|
||||
&& mkdir /opt/oracle \
|
||||
&& cd /opt/oracle \
|
||||
&& wget https://github.com/diogomascarenha/oracle-instantclient/raw/master/instantclient-basic-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget https://github.com/diogomascarenha/oracle-instantclient/raw/master/instantclient-sdk-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget ${ORACLE_INSTANT_CLIENT_MIRROR}instantclient-basic-linux.x64-12.1.0.2.0.zip \
|
||||
&& wget ${ORACLE_INSTANT_CLIENT_MIRROR}instantclient-sdk-linux.x64-12.1.0.2.0.zip \
|
||||
&& unzip /opt/oracle/instantclient-basic-linux.x64-12.1.0.2.0.zip -d /opt/oracle \
|
||||
&& unzip /opt/oracle/instantclient-sdk-linux.x64-12.1.0.2.0.zip -d /opt/oracle \
|
||||
&& ln -s /opt/oracle/instantclient_12_1/libclntsh.so.12.1 /opt/oracle/instantclient_12_1/libclntsh.so \
|
||||
@ -858,7 +907,7 @@ RUN if [ ${INSTALL_OCI8} = true ]; then \
|
||||
if [ $(php -r "echo PHP_MAJOR_VERSION;") = "5" ]; then \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8-2.0.10; \
|
||||
else \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8; \
|
||||
echo 'instantclient,/opt/oracle/instantclient_12_1/' | pecl install oci8-2.2.0; \
|
||||
fi \
|
||||
&& echo "extension=oci8.so" >> /etc/php/${LARADOCK_PHP_VERSION}/cli/php.ini \
|
||||
&& php -m | grep -q 'oci8' \
|
||||
|
@ -87,7 +87,6 @@ alias npm-global="npm list -g --depth 0"
|
||||
alias ra="reload"
|
||||
alias reload="source ~/.aliases && echo \"$COL_GREEN ==> Aliases Reloaded... $COL_RESET \n \""
|
||||
alias run="npm run"
|
||||
alias tree="xtree"
|
||||
|
||||
# Xvfb
|
||||
alias xvfb="Xvfb -ac :0 -screen 0 1024x768x16 &"
|
||||
|
Loading…
Reference in New Issue
Block a user