Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
piplus-backend-v5-server-dockers
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
piplus-backend-v5
piplus-backend-v5-server-dockers
Commits
22154a64
Commit
22154a64
authored
Jan 23, 2017
by
focus
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
增加秒杀模块
parent
44942159
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
300 additions
and
18 deletions
+300
-18
Dockerfile
piplus-backend-v5-es-docker/Dockerfile
+37
-0
README.md
piplus-backend-v5-es-docker/README.md
+0
-0
java.policy
piplus-backend-v5-es-docker/java.policy
+18
-0
log4j2.properties
piplus-backend-v5-es-docker/log4j2.properties
+74
-0
start
piplus-backend-v5-es-docker/start
+132
-0
schema.xml
piplus-backend-v5-mycat-docker/conf-prod/schema.xml
+17
-0
server.xml
piplus-backend-v5-mycat-docker/conf-prod/server.xml
+22
-18
No files found.
piplus-backend-v5-es-docker/Dockerfile
0 → 100644
View file @
22154a64
FROM
openjdk:8u111-jre-alpine
MAINTAINER
focus@hudongpai.com
RUN
apk
-U
add bash
ENV
ES_VERSION=5.1.2
ADD
http://terran-oss-dev.oss-cn-hangzhou.aliyuncs.com/terran-devops/lib/elasticsearch-$ES_VERSION.tar.gz /tmp/es.tgz
RUN
cd
/usr/share
&&
\
tar
xf /tmp/es.tgz
&&
\
rm
/tmp/es.tgz
EXPOSE
9200 9300
ENV
ES_HOME=/usr/share/elasticsearch-$ES_VERSION \
DEFAULT_ES_USER=elasticsearch \
DISCOVER_TRANSPORT_IP=eth0 \
DISCOVER_HTTP_IP=eth0 \
ES_JAVA_OPTS="-Xms1g -Xmx1g"
RUN
adduser
-S
-s
/bin/sh
$DEFAULT_ES_USER
VOLUME
["/data","/conf"]
WORKDIR
$ES_HOME
COPY
java.policy /usr/lib/jvm/java-1.8-openjdk/jre/lib/security/
COPY
start /start
COPY
log4j2.properties $ES_HOME/config/
RUN
chmod
+x /start
#install x-pack
RUN
bin/elasticsearch-plugin
install
http://terran-oss-dev.oss-cn-hangzhou.aliyuncs.com/terran-devops/lib/x-pack-5.1.2.zip
CMD
["/start"]
piplus-backend-v5-es-docker/README.md
0 → 100644
View file @
22154a64
piplus-backend-v5-es-docker/java.policy
0 → 100644
View file @
22154a64
grant {
// JMX Java Management eXtensions
permission javax.management.MBeanTrustPermission "register";
permission javax.management.MBeanServerPermission "createMBeanServer";
permission javax.management.MBeanPermission "-#-[-]", "queryNames";
//x-pack
permission java.lang.RuntimePermission "accessClassInPackage.com.sun.activation.registries";
permission java.lang.RuntimePermission "getClassLoader";
permission java.lang.RuntimePermission "setContextClassLoader";
permission java.lang.RuntimePermission "setFactory";
permission java.security.SecurityPermission "createPolicy.JavaPolicy";
permission java.security.SecurityPermission "getPolicy";
permission java.security.SecurityPermission "putProviderProperty.BC";
permission java.security.SecurityPermission "setPolicy";
permission java.util.PropertyPermission "*" "read,write";
permission java.util.PropertyPermission "sun.nio.ch.bugLevel" "write";
permission javax.net.ssl.SSLPermission "setHostnameVerifier";
};
piplus-backend-v5-es-docker/log4j2.properties
0 → 100644
View file @
22154a64
status
=
error
# log action execution errors for easier debugging
logger.action.name
=
org.elasticsearch.action
logger.action.level
=
debug
appender.console.type
=
Console
appender.console.name
=
console
appender.console.layout.type
=
PatternLayout
appender.console.layout.pattern
=
[%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
appender.rolling.type
=
RollingFile
appender.rolling.name
=
rolling
appender.rolling.fileName
=
${sys:es.logs}.log
appender.rolling.layout.type
=
PatternLayout
appender.rolling.layout.pattern
=
[%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.rolling.filePattern
=
${sys:es.logs}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type
=
Policies
appender.rolling.policies.time.type
=
TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval
=
1
appender.rolling.policies.time.modulate
=
true
rootLogger.level
=
info
rootLogger.appenderRef.console.ref
=
console
#rootLogger.appenderRef.rolling.ref = rolling
appender.deprecation_rolling.type
=
RollingFile
appender.deprecation_rolling.name
=
deprecation_rolling
appender.deprecation_rolling.fileName
=
${sys:es.logs}_deprecation.log
appender.deprecation_rolling.layout.type
=
PatternLayout
appender.deprecation_rolling.layout.pattern
=
[%d{ISO8601}][%-5p][%-25c{1.}] %marker%.10000m%n
appender.deprecation_rolling.filePattern
=
${sys:es.logs}_deprecation-%i.log.gz
appender.deprecation_rolling.policies.type
=
Policies
appender.deprecation_rolling.policies.size.type
=
SizeBasedTriggeringPolicy
appender.deprecation_rolling.policies.size.size
=
1GB
appender.deprecation_rolling.strategy.type
=
DefaultRolloverStrategy
appender.deprecation_rolling.strategy.max
=
4
logger.deprecation.name
=
org.elasticsearch.deprecation
logger.deprecation.level
=
warn
#logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
logger.deprecation.additivity
=
false
appender.index_search_slowlog_rolling.type
=
RollingFile
appender.index_search_slowlog_rolling.name
=
index_search_slowlog_rolling
appender.index_search_slowlog_rolling.fileName
=
${sys:es.logs}_index_search_slowlog.log
appender.index_search_slowlog_rolling.layout.type
=
PatternLayout
appender.index_search_slowlog_rolling.layout.pattern
=
[%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_search_slowlog_rolling.filePattern
=
${sys:es.logs}_index_search_slowlog-%d{yyyy-MM-dd}.log
appender.index_search_slowlog_rolling.policies.type
=
Policies
appender.index_search_slowlog_rolling.policies.time.type
=
TimeBasedTriggeringPolicy
appender.index_search_slowlog_rolling.policies.time.interval
=
1
appender.index_search_slowlog_rolling.policies.time.modulate
=
true
logger.index_search_slowlog_rolling.name
=
index.search.slowlog
logger.index_search_slowlog_rolling.level
=
trace
logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref
=
console
logger.index_search_slowlog_rolling.additivity
=
false
appender.index_indexing_slowlog_rolling.type
=
RollingFile
appender.index_indexing_slowlog_rolling.name
=
index_indexing_slowlog_rolling
appender.index_indexing_slowlog_rolling.fileName
=
${sys:es.logs}_index_indexing_slowlog.log
appender.index_indexing_slowlog_rolling.layout.type
=
PatternLayout
appender.index_indexing_slowlog_rolling.layout.pattern
=
[%d{ISO8601}][%-5p][%-25c] %marker%.10000m%n
appender.index_indexing_slowlog_rolling.filePattern
=
${sys:es.logs}_index_indexing_slowlog-%d{yyyy-MM-dd}.log
appender.index_indexing_slowlog_rolling.policies.type
=
Policies
appender.index_indexing_slowlog_rolling.policies.time.type
=
TimeBasedTriggeringPolicy
appender.index_indexing_slowlog_rolling.policies.time.interval
=
1
appender.index_indexing_slowlog_rolling.policies.time.modulate
=
true
logger.index_indexing_slowlog.name
=
index.indexing.slowlog.index
logger.index_indexing_slowlog.level
=
trace
logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref
=
console
logger.index_indexing_slowlog.additivity
=
false
piplus-backend-v5-es-docker/start
0 → 100644
View file @
22154a64
#!/bin/sh
pre_checks
()
{
mmc
=
$(
sysctl vm.max_map_count|sed
's/.*= //'
)
if
[[
$mmc
-lt
262144
]]
;
then
echo
"
ERROR: As of 5.0.0 Elasticsearch requires increasing mmap counts.
Refer to https://www.elastic.co/guide/en/elasticsearch/reference/current/vm-max-map-count.html
"
exit
1
fi
}
discoverIpFromLink
()
{
dev
=
$1
mode
=
$2
ip
=
`
ipaddr show dev
$dev
scope global|awk
'$1 == "inet" { if (!match($2,"/32")) { gsub("/.*","",$2) ; print $2 } }'
`
echo
"Discovered
$mode
address
$ip
for
$dev
"
OPTS
=
"
$OPTS
-E
$mode
.host=
$ip
"
}
setup_clustering
()
{
if
[
-n
"
$CLUSTER
"
]
;
then
OPTS
=
"
$OPTS
-E cluster.name=
$CLUSTER
"
if
[
-n
"
$CLUSTER_FROM
"
]
;
then
if
[
-d
/data/
$CLUSTER_FROM
-a
!
-d
/data/
$CLUSTER
]
;
then
echo
"Performing cluster data migration from
$CLUSTER_FROM
to
$CLUSTER
"
mv
/data/
$CLUSTER_FROM
/data/
$CLUSTER
fi
fi
fi
if
[
-n
"
$NODE_NAME
"
]
;
then
OPTS
=
"
$OPTS
-E node.name=
$NODE_NAME
"
fi
if
[
-n
"
$MULTICAST
"
]
;
then
OPTS
=
"
$OPTS
-E discovery.zen.ping.multicast.enabled=
$MULTICAST
"
fi
if
[
-n
"
$UNICAST_HOSTS
"
]
;
then
OPTS
=
"
$OPTS
-E discovery.zen.ping.unicast.hosts=
$UNICAST_HOSTS
"
fi
if
[
-n
"
$PUBLISH_AS
"
]
;
then
OPTS
=
"
$OPTS
-E transport.publish_host=
$(
echo
$PUBLISH_AS
|
awk
-F
:
'{print $1}'
)
"
OPTS
=
"
$OPTS
-E transport.publish_port=
$(
echo
$PUBLISH_AS
|
awk
-F
:
'{if ($2) print $2; else print 9300}'
)
"
fi
if
[
-n
"
$MIN_MASTERS
"
]
;
then
OPTS
=
"
$OPTS
-E discovery.zen.minimum_master_nodes=
$MIN_MASTERS
"
fi
}
install_plugins
()
{
if
[
-n
"
$PLUGINS
"
]
;
then
for
p
in
$(
echo
$PLUGINS
|
awk
-v
RS
=
,
'{print}'
)
do
echo
"Installing the plugin
$p
"
$ES_HOME
/bin/elasticsearch-plugin
install
$p
done
else
mkdir
-p
$ES_HOME
/plugins
fi
}
setup_personality
()
{
if
[
-n
"
$TYPE
"
]
;
then
case
$TYPE
in
MASTER
)
OPTS
=
"
$OPTS
-E node.master=true -E node.data=false"
;;
GATEWAY
)
OPTS
=
"
$OPTS
-E node.master=false -E node.data=false"
;;
DATA|NON_MASTER
)
OPTS
=
"
$OPTS
-E node.master=false -E node.data=true"
;;
*
)
echo
"Unknown node type. Please use MASTER|GATEWAY|DATA|NON_MASTER"
exit
1
esac
fi
}
pre_checks
if
[
-f
/conf/env
]
;
then
.
/conf/env
fi
if
[
!
-e
/conf/elasticsearch.
*
]
;
then
cp
$ES_HOME
/config/elasticsearch.yml /conf
fi
if
[
!
-e
/conf/log4j2.properties
]
;
then
cp
$ES_HOME
/config/log4j2.properties /conf
fi
OPTS
=
"
$OPTS
\
-E path.conf=/conf
\
-E path.data=/data
\
-E path.logs=/data
\
-E transport.tcp.port=9300
\
-E http.port=9200"
discoverIpFromLink
$DISCOVER_TRANSPORT_IP
transport
discoverIpFromLink
$DISCOVER_HTTP_IP
http
setup_personality
setup_clustering
install_plugins
mkdir
-p
/conf/scripts
echo
"Starting Elasticsearch with the options
$OPTS
"
CMD
=
"
$ES_HOME
/bin/elasticsearch
$OPTS
"
if
[
`
id
-u
`
=
0
]
;
then
echo
"Running as non-root..."
chown
-R
$DEFAULT_ES_USER
/data /conf
su
-c
"
$CMD
"
$DEFAULT_ES_USER
else
$CMD
fi
piplus-backend-v5-mycat-docker/conf-prod/schema.xml
View file @
22154a64
...
@@ -139,6 +139,14 @@
...
@@ -139,6 +139,14 @@
</schema>
</schema>
<schema
name=
"promotion_service"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
>
<table
name=
"pi_seckill_activity"
primaryKey=
"id"
dataNode=
"promotion_service_$1-8"
autoIncrement=
"false"
rule=
"mod-ent-id"
/>
<table
name=
"pi_seckill_condition"
primaryKey=
"id"
dataNode=
"promotion_service_$1-8"
autoIncrement=
"false"
rule=
"mod-ent-id"
/>
<table
name=
"pi_seckill_order"
primaryKey=
"id"
dataNode=
"promotion_service_$1-8"
autoIncrement=
"false"
rule=
"mod-ent-id"
/>
<table
name=
"pi_seckill_order_operate_log"
primaryKey=
"id"
dataNode=
"promotion_service_$1-8"
autoIncrement=
"false"
rule=
"mod-ent-id"
/>
<table
name=
"pi_seckill_order_payment"
primaryKey=
"id"
dataNode=
"promotion_service_$1-8"
autoIncrement=
"false"
rule=
"mod-ent-id"
/>
</schema>
<schema
name=
"auth_service"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
dataNode=
"auth_service"
></schema>
<schema
name=
"auth_service"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
dataNode=
"auth_service"
></schema>
<schema
name=
"plat_service"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
dataNode=
"plat_service"
></schema>
<schema
name=
"plat_service"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
dataNode=
"plat_service"
></schema>
...
@@ -199,6 +207,15 @@
...
@@ -199,6 +207,15 @@
<dataNode
name=
"points_mall_service_7"
dataHost=
"rds2"
database=
"points_mall_service_7"
/>
<dataNode
name=
"points_mall_service_7"
dataHost=
"rds2"
database=
"points_mall_service_7"
/>
<dataNode
name=
"points_mall_service_8"
dataHost=
"rds2"
database=
"points_mall_service_8"
/>
<dataNode
name=
"points_mall_service_8"
dataHost=
"rds2"
database=
"points_mall_service_8"
/>
<dataNode
name=
"promotion_service_1"
dataHost=
"rds1"
database=
"promotion_service_1"
/>
<dataNode
name=
"promotion_service_2"
dataHost=
"rds1"
database=
"promotion_service_2"
/>
<dataNode
name=
"promotion_service_3"
dataHost=
"rds1"
database=
"promotion_service_3"
/>
<dataNode
name=
"promotion_service_4"
dataHost=
"rds1"
database=
"promotion_service_4"
/>
<dataNode
name=
"promotion_service_5"
dataHost=
"rds2"
database=
"promotion_service_5"
/>
<dataNode
name=
"promotion_service_6"
dataHost=
"rds2"
database=
"promotion_service_6"
/>
<dataNode
name=
"promotion_service_7"
dataHost=
"rds2"
database=
"promotion_service_7"
/>
<dataNode
name=
"promotion_service_8"
dataHost=
"rds2"
database=
"promotion_service_8"
/>
<dataNode
name=
"auth_service"
dataHost=
"rds1"
database=
"auth_service"
/>
<dataNode
name=
"auth_service"
dataHost=
"rds1"
database=
"auth_service"
/>
...
...
piplus-backend-v5-mycat-docker/conf-prod/server.xml
View file @
22154a64
<?xml version="1.0" encoding="UTF-8"?>
<?xml version="1.0" encoding="UTF-8"?>
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License. - You
- you may not use this file except in compliance with the License. - You
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
- - Unless required by applicable law or agreed to in writing, software -
- - Unless required by applicable law or agreed to in writing, software -
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
License for the specific language governing permissions and - limitations
License for the specific language governing permissions and - limitations
under the License. -->
under the License. -->
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server
xmlns:mycat=
"http://io.mycat/"
>
<mycat:server
xmlns:mycat=
"http://io.mycat/"
>
...
@@ -17,9 +17,9 @@
...
@@ -17,9 +17,9 @@
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>-->
<!--设置模拟的MySQL版本号-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>-->
<!--设置模拟的MySQL版本号-->
<!-- <property name="processorBufferChunk">40960</property> -->
<!-- <property name="processorBufferChunk">40960</property> -->
<!--
<!--
<property name="processors">1</property>
<property name="processors">1</property>
<property name="processorExecutor">32</property>
<property name="processorExecutor">32</property>
-->
-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena-->
<property
name=
"processorBufferPoolType"
>
0
</property>
<property
name=
"processorBufferPoolType"
>
0
</property>
...
@@ -30,12 +30,12 @@
...
@@ -30,12 +30,12 @@
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--
<!--
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<property
name=
"handleDistributedTransactions"
>
0
</property>
<property
name=
"handleDistributedTransactions"
>
0
</property>
<!--
<!--
off heap for merge/order/group/limit 1开启 0关闭
off heap for merge/order/group/limit 1开启 0关闭
-->
-->
...
@@ -64,10 +64,10 @@
...
@@ -64,10 +64,10 @@
</system>
</system>
<!-- 全局SQL防火墙设置 -->
<!-- 全局SQL防火墙设置 -->
<!--
<!--
<firewall>
<firewall>
<whitehost>
<whitehost>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
...
@@ -76,7 +76,7 @@
...
@@ -76,7 +76,7 @@
</blacklist>
</blacklist>
</firewall>
</firewall>
-->
-->
<user
name=
"user_service"
>
<user
name=
"user_service"
>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"schemas"
>
user_service
</property>
<property
name=
"schemas"
>
user_service
</property>
...
@@ -117,6 +117,10 @@
...
@@ -117,6 +117,10 @@
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"schemas"
>
schedule_service
</property>
<property
name=
"schemas"
>
schedule_service
</property>
</user>
</user>
<user
name=
"promotion_service"
>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"schemas"
>
promotion_service
</property>
</user>
<user
name=
"sequence_service_1"
>
<user
name=
"sequence_service_1"
>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"schemas"
>
sequence_service_1
</property>
<property
name=
"schemas"
>
sequence_service_1
</property>
...
@@ -127,7 +131,7 @@
...
@@ -127,7 +131,7 @@
</user>
</user>
<user
name=
"terran"
>
<user
name=
"terran"
>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"password"
>
Terran123456
</property>
<property
name=
"schemas"
>
stat_service,schedule_service,sequence_service_1,sequence_service_2,auth_service,plat_service,sys_service,user_service,forum_service,points_mall_service,ecom_base_service,ecom_order_service
</property>
<property
name=
"schemas"
>
stat_service,schedule_service,sequence_service_1,sequence_service_2,auth_service,plat_service,sys_service,user_service,forum_service,points_mall_service,ecom_base_service,ecom_order_service
,promotion_service
</property>
</user>
</user>
</mycat:server>
</mycat:server>
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment