Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
P
piplus-backend-v5-server-dockers
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
piplus-backend-v5
piplus-backend-v5-server-dockers
Commits
57419171
Commit
57419171
authored
Oct 29, 2016
by
focus
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix
parent
8b52d481
Changes
38
Hide whitespace changes
Inline
Side-by-side
Showing
38 changed files
with
1020 additions
and
340 deletions
+1020
-340
Dockerfile
piplus-backend-v5-mycat-docker/Dockerfile
+3
-3
auto-sharding-long.txt
piplus-backend-v5-mycat-docker/conf/auto-sharding-long.txt
+3
-0
auto-sharding-rang-mod.txt
...s-backend-v5-mycat-docker/conf/auto-sharding-rang-mod.txt
+5
-0
cacheservice.properties
piplus-backend-v5-mycat-docker/conf/cacheservice.properties
+1
-1
dnindex.properties
piplus-backend-v5-mycat-docker/conf/dnindex.properties
+0
-3
index_to_charset.properties
...-backend-v5-mycat-docker/conf/index_to_charset.properties
+162
-34
log4j.xml
piplus-backend-v5-mycat-docker/conf/log4j.xml
+0
-42
log4j2.xml
piplus-backend-v5-mycat-docker/conf/log4j2.xml
+32
-0
migrateTables.properties
piplus-backend-v5-mycat-docker/conf/migrateTables.properties
+6
-0
myid.properties
piplus-backend-v5-mycat-docker/conf/myid.properties
+5
-0
router.xml
piplus-backend-v5-mycat-docker/conf/router.xml
+0
-28
rule.xml
piplus-backend-v5-mycat-docker/conf/rule.xml
+21
-16
schema.xml
piplus-backend-v5-mycat-docker/conf/schema.xml
+44
-52
sequence_distributed_conf.properties
...v5-mycat-docker/conf/sequence_distributed_conf.properties
+2
-0
server.xml
piplus-backend-v5-mycat-docker/conf/server.xml
+65
-18
sharding-by-enum.txt
piplus-backend-v5-mycat-docker/conf/sharding-by-enum.txt
+2
-0
wrapper.conf
piplus-backend-v5-mycat-docker/conf/wrapper.conf
+1
-1
zk-create.yaml
piplus-backend-v5-mycat-docker/conf/zk-create.yaml
+0
-142
auto-sharding-long.txt
...ackend-v5-mycat-docker/conf/zkconf/auto-sharding-long.txt
+3
-0
auto-sharding-rang-mod.txt
...nd-v5-mycat-docker/conf/zkconf/auto-sharding-rang-mod.txt
+5
-0
autopartition-long.txt
...ackend-v5-mycat-docker/conf/zkconf/autopartition-long.txt
+5
-0
cacheservice.properties
...ckend-v5-mycat-docker/conf/zkconf/cacheservice.properties
+7
-0
ehcache.xml
piplus-backend-v5-mycat-docker/conf/zkconf/ehcache.xml
+8
-0
index_to_charset.properties
...d-v5-mycat-docker/conf/zkconf/index_to_charset.properties
+219
-0
partition-hash-int.txt
...ackend-v5-mycat-docker/conf/zkconf/partition-hash-int.txt
+2
-0
partition-range-mod.txt
...ckend-v5-mycat-docker/conf/zkconf/partition-range-mod.txt
+6
-0
rule.xml
piplus-backend-v5-mycat-docker/conf/zkconf/rule.xml
+120
-0
schema.xml
piplus-backend-v5-mycat-docker/conf/zkconf/schema.xml
+78
-0
sequence_conf.properties
...kend-v5-mycat-docker/conf/zkconf/sequence_conf.properties
+27
-0
sequence_db_conf.properties
...d-v5-mycat-docker/conf/zkconf/sequence_db_conf.properties
+5
-0
sequence_distributed_conf-mycat_fz_01.properties
...f/zkconf/sequence_distributed_conf-mycat_fz_01.properties
+2
-0
sequence_distributed_conf.properties
...t-docker/conf/zkconf/sequence_distributed_conf.properties
+2
-0
sequence_time_conf-mycat_fz_01.properties
...ker/conf/zkconf/sequence_time_conf-mycat_fz_01.properties
+3
-0
sequence_time_conf.properties
...v5-mycat-docker/conf/zkconf/sequence_time_conf.properties
+3
-0
server-mycat_fz_01.xml
...ackend-v5-mycat-docker/conf/zkconf/server-mycat_fz_01.xml
+84
-0
server.xml
piplus-backend-v5-mycat-docker/conf/zkconf/server.xml
+84
-0
sharding-by-enum.txt
...-backend-v5-mycat-docker/conf/zkconf/sharding-by-enum.txt
+2
-0
auto-sharding-long.txt
...nd-v5-mycat-docker/conf/zkdownload/auto-sharding-long.txt
+3
-0
No files found.
piplus-backend-v5-mycat-docker/Dockerfile
View file @
57419171
...
@@ -17,12 +17,12 @@ RUN \
...
@@ -17,12 +17,12 @@ RUN \
tar
zxvf Mycat-server-
$MYCAT_VERSION
-linux
.tar.gz
-C
/opt
&&
\
tar
zxvf Mycat-server-
$MYCAT_VERSION
-linux
.tar.gz
-C
/opt
&&
\
rm
-rf
/opt/mycat/conf
rm
-rf
/opt/mycat/conf
# Add src
ADD
conf /opt/mycat/conf
# Define mountable directories.
# Define mountable directories.
VOLUME
["/opt/mycat/conf"]
VOLUME
["/opt/mycat/conf"]
# Add src
ADD
conf /opt/mycat/conf
# Expose ports.
# Expose ports.
EXPOSE
8066
EXPOSE
8066
...
...
piplus-backend-v5-mycat-docker/conf/auto-sharding-long.txt
0 → 100755
View file @
57419171
2000001-4000000=1
0-2000000=0
4000001-8000000=2
piplus-backend-v5-mycat-docker/conf/auto-sharding-rang-mod.txt
0 → 100755
View file @
57419171
800M1-1000M=6
600M1-800M=4
200M1-400M=1
0-200M=5
400M1-600M=4
piplus-backend-v5-mycat-docker/conf/cacheservice.properties
View file @
57419171
#used for mycat cache service conf
#used for mycat cache service conf
factory.encache
=
org.opencloudb
.cache.impl.EnchachePooFactory
factory.encache
=
io.mycat
.cache.impl.EnchachePooFactory
#key is pool name ,value is type,max size, expire seconds
#key is pool name ,value is type,max size, expire seconds
pool.SQLRouteCache
=
encache,10000,1800
pool.SQLRouteCache
=
encache,10000,1800
pool.ER_SQL2PARENTID
=
encache,1000,1800
pool.ER_SQL2PARENTID
=
encache,1000,1800
...
...
piplus-backend-v5-mycat-docker/conf/dnindex.properties
deleted
100644 → 0
View file @
8b52d481
#update
#Sun Aug 07 01:36:38 CST 2016
localhost1
=
0
piplus-backend-v5-mycat-docker/conf/index_to_charset.properties
View file @
57419171
1
=
big5
1
=
big5
2
=
czech
2
=
latin2
3
=
dec8
3
=
dec8
4
=
dos
4
=
cp850
5
=
germa
n1
5
=
lati
n1
6
=
hp8
6
=
hp8
7
=
koi8
_ru
7
=
koi8
r
8
=
latin1
8
=
latin1
9
=
latin2
9
=
latin2
10
=
swe7
10
=
swe7
11
=
usa7
11
=
ascii
12
=
ujis
12
=
ujis
13
=
sjis
13
=
sjis
14
=
cp1251
14
=
cp1251
15
=
danish
15
=
latin1
16
=
hebrew
16
=
hebrew
18
=
tis620
18
=
tis620
19
=
euc
_
kr
19
=
euckr
20
=
estonia
20
=
latin7
21
=
hungarian
21
=
latin2
22
=
koi8
_ukr
22
=
koi8
u
23
=
win1251ukr
23
=
cp1251
24
=
gb2312
24
=
gb2312
25
=
greek
25
=
greek
26
=
win
1250
26
=
cp
1250
27
=
croat
27
=
latin2
28
=
gbk
28
=
gbk
29
=
cp1257
29
=
cp1257
30
=
latin5
30
=
latin5
31
=
latin1
_de
31
=
latin1
32
=
armscii8
32
=
armscii8
33
=
utf8
33
=
utf8
34
=
win1250ch
34
=
cp1250
35
=
ucs2
35
=
ucs2
36
=
cp866
36
=
cp866
37
=
keybcs2
37
=
keybcs2
38
=
macce
38
=
macce
39
=
macroman
39
=
macroman
40
=
pclatin
2
40
=
cp85
2
41
=
lat
vian
41
=
lat
in7
42
=
lat
vian1
42
=
lat
in7
43
=
macce
bin
43
=
macce
44
=
macceciai
44
=
cp1250
45
=
utf8mb4
45
=
utf8mb4
46
=
maccecsas
46
=
utf8mb4
47
=
latin1
bin
47
=
latin1
48
=
latin1
cias
48
=
latin1
49
=
latin1
csas
49
=
latin1
50
=
cp1251
bin
50
=
cp1251
51
=
cp1251
cias
51
=
cp1251
52
=
cp1251
csas
52
=
cp1251
53
=
macroman
bin
53
=
macroman
54
=
macromancias
54
=
utf16
55
=
macromanciai
55
=
utf16
56
=
macromancsas
56
=
utf16le
57
=
cp1256
57
=
cp1256
58
=
cp1257
59
=
cp1257
60
=
utf32
61
=
utf32
62
=
utf16le
63
=
binary
63
=
binary
64
=
armscii
64
=
armscii
8
65
=
ascii
65
=
ascii
66
=
cp1250
66
=
cp1250
67
=
cp1256
67
=
cp1256
...
@@ -66,7 +71,7 @@
...
@@ -66,7 +71,7 @@
72
=
hp8
72
=
hp8
73
=
keybcs2
73
=
keybcs2
74
=
koi8r
74
=
koi8r
75
=
koi8u
kr
75
=
koi8u
77
=
latin2
77
=
latin2
78
=
latin5
78
=
latin5
79
=
latin7
79
=
latin7
...
@@ -88,4 +93,127 @@
...
@@ -88,4 +93,127 @@
95
=
cp932
95
=
cp932
96
=
cp932
96
=
cp932
97
=
eucjpms
97
=
eucjpms
98
=
eucjpms
98
=
eucjpms
\ No newline at end of file
99
=
cp1250
101
=
utf16
102
=
utf16
103
=
utf16
104
=
utf16
105
=
utf16
106
=
utf16
107
=
utf16
108
=
utf16
109
=
utf16
110
=
utf16
111
=
utf16
112
=
utf16
113
=
utf16
114
=
utf16
115
=
utf16
116
=
utf16
117
=
utf16
118
=
utf16
119
=
utf16
120
=
utf16
121
=
utf16
122
=
utf16
123
=
utf16
124
=
utf16
128
=
ucs2
129
=
ucs2
130
=
ucs2
131
=
ucs2
132
=
ucs2
133
=
ucs2
134
=
ucs2
135
=
ucs2
136
=
ucs2
137
=
ucs2
138
=
ucs2
139
=
ucs2
140
=
ucs2
141
=
ucs2
142
=
ucs2
143
=
ucs2
144
=
ucs2
145
=
ucs2
146
=
ucs2
147
=
ucs2
148
=
ucs2
149
=
ucs2
150
=
ucs2
151
=
ucs2
159
=
ucs2
160
=
utf32
161
=
utf32
162
=
utf32
163
=
utf32
164
=
utf32
165
=
utf32
166
=
utf32
167
=
utf32
168
=
utf32
169
=
utf32
170
=
utf32
171
=
utf32
172
=
utf32
173
=
utf32
174
=
utf32
175
=
utf32
176
=
utf32
177
=
utf32
178
=
utf32
179
=
utf32
180
=
utf32
181
=
utf32
182
=
utf32
183
=
utf32
192
=
utf8
193
=
utf8
194
=
utf8
195
=
utf8
196
=
utf8
197
=
utf8
198
=
utf8
199
=
utf8
200
=
utf8
201
=
utf8
202
=
utf8
203
=
utf8
204
=
utf8
205
=
utf8
206
=
utf8
207
=
utf8
208
=
utf8
209
=
utf8
210
=
utf8
211
=
utf8
212
=
utf8
213
=
utf8
214
=
utf8
215
=
utf8
223
=
utf8
224
=
utf8mb4
225
=
utf8mb4
226
=
utf8mb4
227
=
utf8mb4
228
=
utf8mb4
229
=
utf8mb4
230
=
utf8mb4
231
=
utf8mb4
232
=
utf8mb4
233
=
utf8mb4
234
=
utf8mb4
235
=
utf8mb4
236
=
utf8mb4
237
=
utf8mb4
238
=
utf8mb4
239
=
utf8mb4
240
=
utf8mb4
241
=
utf8mb4
242
=
utf8mb4
243
=
utf8mb4
244
=
utf8mb4
245
=
utf8mb4
246
=
utf8mb4
247
=
utf8mb4
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/log4j.xml
deleted
100755 → 0
View file @
8b52d481
<?xml version="1.0" encoding="UTF-8"?>
<!--
- Copyright 1999-2012 Alibaba Group.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-->
<!DOCTYPE log4j:configuration SYSTEM "log4j.dtd">
<log4j:configuration
xmlns:log4j=
"http://jakarta.apache.org/log4j/"
>
<appender
name=
"ConsoleAppender"
class=
"org.apache.log4j.ConsoleAppender"
>
<layout
class=
"org.apache.log4j.PatternLayout"
>
<param
name=
"ConversionPattern"
value=
"%d{MM-dd HH:mm:ss.SSS} %5p [%t] (%F:%L) -%m%n"
/>
</layout>
</appender>
<appender
name=
"FILE"
class=
"org.apache.log4j.RollingFileAppender"
>
<param
name=
"file"
value=
"${MYCAT_HOME}/logs/mycat.log"
/>
<param
name=
"Append"
value=
"false"
/>
<param
name=
"MaxFileSize"
value=
"1000KB"
/>
<param
name=
"MaxBackupIndex"
value=
"10"
/>
<param
name=
"encoding"
value=
"UTF-8"
/>
<layout
class=
"org.apache.log4j.PatternLayout"
>
<param
name=
"ConversionPattern"
value=
"%d{MM/dd HH:mm:ss.SSS} %5p [%t] (%F:%L) -%m%n"
/>
</layout>
</appender>
<root>
<level
value=
"debug"
/>
<appender-ref
ref=
"FILE"
/>
<!--<appender-ref ref="FILE" />-->
</root>
</log4j:configuration>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/log4j2.xml
0 → 100755
View file @
57419171
<?xml version="1.0" encoding="UTF-8"?>
<Configuration
status=
"WARN"
>
<Appenders>
<Console
name=
"Console"
target=
"SYSTEM_OUT"
>
<PatternLayout
pattern=
"%d [%-5p][%t] %m %throwable{full} (%C:%F:%L) %n"
/>
</Console>
<RollingFile
name=
"RollingFile"
fileName=
"${sys:MYCAT_HOME}/logs/mycat.log"
filePattern=
"${sys:MYCAT_HOME}/logs/$${date:yyyy-MM}/mycat-%d{MM-dd}-%i.log.gz"
>
<PatternLayout>
<Pattern>
%d{yyyy-MM-dd HH:mm:ss.SSS} %5p [%t] (%l) - %m%n
</Pattern>
</PatternLayout>
<Policies>
<OnStartupTriggeringPolicy/>
<SizeBasedTriggeringPolicy
size=
"250 MB"
/>
<TimeBasedTriggeringPolicy/>
</Policies>
</RollingFile>
</Appenders>
<Loggers>
<!--<AsyncLogger name="io.mycat" level="info" includeLocation="true" additivity="false">-->
<!--<AppenderRef ref="Console"/>-->
<!--<AppenderRef ref="RollingFile"/>-->
<!--</AsyncLogger>-->
<asyncRoot
level=
"info"
includeLocation=
"true"
>
<AppenderRef
ref=
"Console"
/>
<AppenderRef
ref=
"RollingFile"
/>
</asyncRoot>
</Loggers>
</Configuration>
piplus-backend-v5-mycat-docker/conf/migrateTables.properties
0 → 100755
View file @
57419171
#schema1=tb1,tb2,...
#schema2=all(写all或者不写将对此schema下拆分节点变化的拆分表全部进行重新路由)
#...
#sample
#
TESTDB
=
travelrecord,company,goods
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/myid.properties
View file @
57419171
loadZk
=
false
loadZk
=
false
zkURL
=
127.0.0.1:2181
zkURL
=
127.0.0.1:2181
clusterId
=
mycat-cluster-1
myid
=
mycat_fz_01
myid
=
mycat_fz_01
clusterNodes
=
mycat_fz_01,mycat_fz_02,mycat_fz_04
#server booster ; booster install on db same server,will reset all minCon to 1
type
=
server
boosterDataHosts
=
dn2,dn3
piplus-backend-v5-mycat-docker/conf/router.xml
deleted
100755 → 0
View file @
8b52d481
<?xml version="1.0" encoding="UTF-8"?>
<!--
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
- Add by SkyKong 2012-09-01
-->
<!DOCTYPE daas:router SYSTEM "router.dtd">
<mycat:router
xmlns:mycat=
"http://org.opencloudb/"
>
<!-- schema is db name which config in schema.xml -->
<queryRouter
schema=
"dbtest"
>
<dataNode>
<name>
dnTest1
</name>
<queryNode>
dnTest1
</queryNode>
</dataNode>
</queryRouter>
</mycat:router>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/rule.xml
View file @
57419171
...
@@ -8,7 +8,7 @@
...
@@ -8,7 +8,7 @@
License for the specific language governing permissions and - limitations
License for the specific language governing permissions and - limitations
under the License. -->
under the License. -->
<!DOCTYPE mycat:rule SYSTEM "rule.dtd">
<!DOCTYPE mycat:rule SYSTEM "rule.dtd">
<mycat:rule
xmlns:mycat=
"http://
org.opencloudb
/"
>
<mycat:rule
xmlns:mycat=
"http://
io.mycat
/"
>
<tableRule
name=
"rule1"
>
<tableRule
name=
"rule1"
>
<rule>
<rule>
<columns>
id
</columns>
<columns>
id
</columns>
...
@@ -41,21 +41,21 @@
...
@@ -41,21 +41,21 @@
<algorithm>
mod-long
</algorithm>
<algorithm>
mod-long
</algorithm>
</rule>
</rule>
</tableRule>
</tableRule>
<tableRule
name=
"
ent-mod-long
"
>
<tableRule
name=
"
sharding-by-murmur
"
>
<rule>
<rule>
<columns>
ent_
id
</columns>
<columns>
id
</columns>
<algorithm>
m
od-long
</algorithm>
<algorithm>
m
urmur
</algorithm>
</rule>
</rule>
</tableRule>
</tableRule>
<tableRule
name=
"
sharding-by-murmur
"
>
<tableRule
name=
"
crc32slot
"
>
<rule>
<rule>
<columns>
id
</columns>
<columns>
id
</columns>
<algorithm>
murmur
</algorithm>
<algorithm>
crc32slot
</algorithm>
</rule>
</rule>
</tableRule>
</tableRule>
<tableRule
name=
"sharding-by-month"
>
<tableRule
name=
"sharding-by-month"
>
<rule>
<rule>
<columns>
create_
dat
e
</columns>
<columns>
create_
tim
e
</columns>
<algorithm>
partbymonth
</algorithm>
<algorithm>
partbymonth
</algorithm>
</rule>
</rule>
</tableRule>
</tableRule>
...
@@ -81,7 +81,7 @@
...
@@ -81,7 +81,7 @@
</tableRule>
</tableRule>
<function
name=
"murmur"
<function
name=
"murmur"
class=
"
org.opencloudb
.route.function.PartitionByMurmurHash"
>
class=
"
io.mycat
.route.function.PartitionByMurmurHash"
>
<property
name=
"seed"
>
0
</property>
<!-- 默认是0 -->
<property
name=
"seed"
>
0
</property>
<!-- 默认是0 -->
<property
name=
"count"
>
2
</property>
<!-- 要分片的数据库节点数量,必须指定,否则没法分片 -->
<property
name=
"count"
>
2
</property>
<!-- 要分片的数据库节点数量,必须指定,否则没法分片 -->
<property
name=
"virtualBucketTimes"
>
160
</property>
<!-- 一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍 -->
<property
name=
"virtualBucketTimes"
>
160
</property>
<!-- 一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍 -->
...
@@ -89,38 +89,43 @@
...
@@ -89,38 +89,43 @@
<!-- <property name="bucketMapPath">/etc/mycat/bucketMapPath</property>
<!-- <property name="bucketMapPath">/etc/mycat/bucketMapPath</property>
用于测试时观察各物理节点与虚拟节点的分布情况,如果指定了这个属性,会把虚拟节点的murmur hash值与物理节点的映射按行输出到这个文件,没有默认值,如果不指定,就不会输出任何东西 -->
用于测试时观察各物理节点与虚拟节点的分布情况,如果指定了这个属性,会把虚拟节点的murmur hash值与物理节点的映射按行输出到这个文件,没有默认值,如果不指定,就不会输出任何东西 -->
</function>
</function>
<function
name=
"crc32slot"
class=
"io.mycat.route.function.PartitionByCRC32PreSlot"
>
<property
name=
"count"
>
2
</property>
<!-- 要分片的数据库节点数量,必须指定,否则没法分片 -->
</function>
<function
name=
"hash-int"
<function
name=
"hash-int"
class=
"
org.opencloudb
.route.function.PartitionByFileMap"
>
class=
"
io.mycat
.route.function.PartitionByFileMap"
>
<property
name=
"mapFile"
>
partition-hash-int.txt
</property>
<property
name=
"mapFile"
>
partition-hash-int.txt
</property>
</function>
</function>
<function
name=
"rang-long"
<function
name=
"rang-long"
class=
"
org.opencloudb
.route.function.AutoPartitionByLong"
>
class=
"
io.mycat
.route.function.AutoPartitionByLong"
>
<property
name=
"mapFile"
>
autopartition-long.txt
</property>
<property
name=
"mapFile"
>
autopartition-long.txt
</property>
</function>
</function>
<function
name=
"mod-long"
class=
"
org.opencloudb
.route.function.PartitionByMod"
>
<function
name=
"mod-long"
class=
"
io.mycat
.route.function.PartitionByMod"
>
<!-- how many data nodes -->
<!-- how many data nodes -->
<property
name=
"count"
>
3
</property>
<property
name=
"count"
>
3
</property>
</function>
</function>
<function
name=
"func1"
class=
"
org.opencloudb
.route.function.PartitionByLong"
>
<function
name=
"func1"
class=
"
io.mycat
.route.function.PartitionByLong"
>
<property
name=
"partitionCount"
>
8
</property>
<property
name=
"partitionCount"
>
8
</property>
<property
name=
"partitionLength"
>
128
</property>
<property
name=
"partitionLength"
>
128
</property>
</function>
</function>
<function
name=
"latestMonth"
<function
name=
"latestMonth"
class=
"
org.opencloudb
.route.function.LatestMonthPartion"
>
class=
"
io.mycat
.route.function.LatestMonthPartion"
>
<property
name=
"splitOneDay"
>
24
</property>
<property
name=
"splitOneDay"
>
24
</property>
</function>
</function>
<function
name=
"partbymonth"
<function
name=
"partbymonth"
class=
"
org.opencloudb
.route.function.PartitionByMonth"
>
class=
"
io.mycat
.route.function.PartitionByMonth"
>
<property
name=
"dateFormat"
>
yyyy-MM-dd
</property>
<property
name=
"dateFormat"
>
yyyy-MM-dd
</property>
<property
name=
"sBeginDate"
>
2015-01-01
</property>
<property
name=
"sBeginDate"
>
2015-01-01
</property>
</function>
</function>
<function
name=
"rang-mod"
class=
"
org.opencloudb
.route.function.PartitionByRangeMod"
>
<function
name=
"rang-mod"
class=
"
io.mycat
.route.function.PartitionByRangeMod"
>
<property
name=
"mapFile"
>
partition-range-mod.txt
</property>
<property
name=
"mapFile"
>
partition-range-mod.txt
</property>
</function>
</function>
<function
name=
"jump-consistent-hash"
class=
"
org.opencloudb
.route.function.PartitionByJumpConsistentHash"
>
<function
name=
"jump-consistent-hash"
class=
"
io.mycat
.route.function.PartitionByJumpConsistentHash"
>
<property
name=
"totalBuckets"
>
3
</property>
<property
name=
"totalBuckets"
>
3
</property>
</function>
</function>
</mycat:rule>
</mycat:rule>
piplus-backend-v5-mycat-docker/conf/schema.xml
View file @
57419171
<?xml version="1.0"?>
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema
xmlns:mycat=
"http://
org.opencloudb/"
>
<mycat:schema
xmlns:mycat=
"http://
io.mycat/"
>
<schema
name=
"TESTDB"
checkSQLschema=
"true"
sqlMaxLimit=
"100"
>
<schema
name=
"TESTDB"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
>
<table
name=
"user"
primaryKey=
"id"
dataNode=
"dn3,dn2,dn1"
rule=
"ent-mod-long"
/>
<table
name=
"role"
primaryKey=
"id"
dataNode=
"dn3,dn2,dn1"
rule=
"ent-mod-long"
/>
<table
name=
"user_role_xref"
primaryKey=
"id"
dataNode=
"dn3,dn2,dn1"
rule=
"ent-mod-long"
/>
<!-- auto sharding by id (long) -->
<!-- auto sharding by id (long) -->
<
!-- <table name="travelrecord" dataNode="dn1,dn2,dn3" rule="auto-sharding-long" /> --
>
<
table
name=
"travelrecord"
dataNode=
"dn1,dn2,dn3"
rule=
"auto-sharding-long"
/
>
<!-- global table is auto cloned to all defined data nodes ,so can join
<!-- global table is auto cloned to all defined data nodes ,so can join
with any table whose sharding node is in the same data node -->
with any table whose sharding node is in the same data node -->
<
!-- <table name="company" primaryKey="ID" dataNode="dn3,dn2,dn1" rule="mod-long"
/>
<
table
name=
"company"
primaryKey=
"ID"
type=
"global"
dataNode=
"dn1,dn2,dn3"
/>
<table name="goods" primaryKey="ID" type="global" dataNode="dn1,dn2" />
-->
<table
name=
"goods"
primaryKey=
"ID"
type=
"global"
dataNode=
"dn1,dn2"
/>
<!-- random sharding using mod sharind rule -->
<!-- random sharding using mod sharind rule -->
<!-- <table name="hotnews" primaryKey="ID" dataNode="dn1,dn2,dn3"
<table
name=
"hotnews"
primaryKey=
"ID"
autoIncrement=
"true"
dataNode=
"dn1,dn2,dn3"
rule=
"mod-long"
/>
<!-- <table name="dual" primaryKey="ID" dataNode="dnx,dnoracle2" type="global"
needAddLimit="false"/> <table name="worker" primaryKey="ID" dataNode="jdbc_dn1,jdbc_dn2,jdbc_dn3"
rule="mod-long" /> -->
rule="mod-long" /> -->
<!-- <table name="dual" primaryKey="ID" dataNode="dnx,dnoracle2" type="global"
<table
name=
"employee"
primaryKey=
"ID"
dataNode=
"dn1,dn2"
needAddLimit="false"/> <table name="worker" primaryKey="ID" dataNode="jdbc_dn1,jdbc_dn2,jdbc_dn3"
rule=
"sharding-by-intfile"
/>
rule="mod-long" /> -->
<table
name=
"customer"
primaryKey=
"ID"
dataNode=
"dn1,dn2"
<!-- <table name="employee" primaryKey="ID" dataNode="dn1,dn2"
rule=
"sharding-by-intfile"
>
rule="sharding-by-intfile" /> -->
<!-- <table name="customer" primaryKey="ID" dataNode="dn1,dn2"
rule="sharding-by-intfile">
<childTable
name=
"orders"
primaryKey=
"ID"
joinKey=
"customer_id"
<childTable
name=
"orders"
primaryKey=
"ID"
joinKey=
"customer_id"
parentKey="id">
parentKey=
"id"
>
<childTable
name=
"order_items"
joinKey=
"order_id"
<childTable
name=
"order_items"
joinKey=
"order_id"
parentKey="id" />
parentKey=
"id"
/>
</childTable>
</childTable>
<childTable
name=
"customer_addr"
primaryKey=
"ID"
joinKey=
"customer_id"
<childTable
name=
"customer_addr"
primaryKey=
"ID"
joinKey=
"customer_id"
parentKey="id" />
parentKey=
"id"
/>
</table>
-->
</table>
<!-- <table name="oc_call" primaryKey="ID" dataNode="dn1$0-743" rule="latest-month-calldate"
<!-- <table name="oc_call" primaryKey="ID" dataNode="dn1$0-743" rule="latest-month-calldate"
/> -->
/> -->
</schema>
</schema>
<!-- <dataNode name="dn1$0-743" dataHost="localhost1" database="db$0-743"
<!-- <dataNode name="dn1$0-743" dataHost="localhost1" database="db$0-743"
/> -->
/> -->
<dataNode
name=
"dn1"
dataHost=
"localhost1"
database=
"db1"
/>
<dataNode
name=
"dn1"
dataHost=
"localhost1"
database=
"db1"
/>
<dataNode
name=
"dn2"
dataHost=
"localhost1"
database=
"db2"
/>
<dataNode
name=
"dn2"
dataHost=
"localhost1"
database=
"db2"
/>
<dataNode
name=
"dn3"
dataHost=
"localhost1"
database=
"db3"
/>
<dataNode
name=
"dn3"
dataHost=
"localhost1"
database=
"db3"
/>
<!-- <dataNode name="dn4" dataHost="localhost2" database="db4" />
<dataNode name="dn5" dataHost="localhost2" database="db5" />
<dataNode name="dn6" dataHost="localhost2" database="db6" /> -->
<!--<dataNode name="dn4" dataHost="sequoiadb1" database="SAMPLE" />
<!--<dataNode name="dn4" dataHost="sequoiadb1" database="SAMPLE" />
<dataNode name="jdbc_dn1" dataHost="jdbchost" database="db1" />
<dataNode name="jdbc_dn1" dataHost="jdbchost" database="db1" />
<dataNode name="jdbc_dn2" dataHost="jdbchost" database="db2" />
<dataNode name="jdbc_dn2" dataHost="jdbchost" database="db2" />
<dataNode name="jdbc_dn3" dataHost="jdbchost" database="db3" /> -->
<dataNode name="jdbc_dn3" dataHost="jdbchost" database="db3" /> -->
<dataHost
name=
"localhost1"
maxCon=
"1000"
minCon=
"10"
balance=
"0"
<dataHost
name=
"localhost1"
maxCon=
"1000"
minCon=
"10"
balance=
"0"
writeType=
"0"
dbType=
"mysql"
dbDriver=
"native"
switchType=
"1"
slaveThreshold=
"100"
>
writeType=
"0"
dbType=
"mysql"
dbDriver=
"native"
switchType=
"1"
slaveThreshold=
"100"
>
<heartbeat>
select user()
</heartbeat>
<heartbeat>
select user()
</heartbeat>
<!-- can have multi write hosts -->
<!-- can have multi write hosts -->
<writeHost
host=
"hostM1"
url=
"
127.0.0.1:4
306"
user=
"root"
<writeHost
host=
"hostM1"
url=
"
localhost:3
306"
user=
"root"
password=
"123456"
>
password=
"123456"
>
<!-- can have multi read hosts -->
<!-- can have multi read hosts -->
<!--<readHo
<readHost
host=
"hostS2"
url=
"192.168.1.200:3306"
user=
"root"
password=
"xxx"
/>
st host="hostS2" url="192.168.1.200:3306" user="root" password="xxx" />-->
</writeHost>
</writeHost>
<
!--<writeHost host="hostS1" url="localhost:3316" user="root"-->
<
writeHost
host=
"hostS1"
url=
"localhost:3316"
user=
"root"
<!--password="123456" />--
>
password=
"123456"
/
>
<!-- <writeHost host="hostM2" url="localhost:3316" user="root" password="123456"/> -->
<!-- <writeHost host="hostM2" url="localhost:3316" user="root" password="123456"/> -->
</dataHost>
</dataHost>
<!--
<!--
<dataHost name="sequoiadb1" maxCon="1000" minCon="1" balance="0" dbType="sequoiadb" dbDriver="jdbc">
<dataHost name="sequoiadb1" maxCon="1000" minCon="1" balance="0" dbType="sequoiadb" dbDriver="jdbc">
<heartbeat> </heartbeat>
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="sequoiadb://1426587161.dbaas.sequoialab.net:11920/SAMPLE" user="jifeng" password="jifeng"></writeHost>
<writeHost host="hostM1" url="sequoiadb://1426587161.dbaas.sequoialab.net:11920/SAMPLE" user="jifeng" password="jifeng"></writeHost>
</dataHost>
</dataHost>
<dataHost name="oracle1" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="oracle" dbDriver="jdbc"> <heartbeat>select 1 from dual</heartbeat>
<dataHost name="oracle1" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="oracle" dbDriver="jdbc"> <heartbeat>select 1 from dual</heartbeat>
<connectionInitSql>alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'</connectionInitSql>
<connectionInitSql>alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'</connectionInitSql>
<writeHost host="hostM1" url="jdbc:oracle:thin:@127.0.0.1:1521:nange" user="base" password="123456" > </writeHost> </dataHost>
<writeHost host="hostM1" url="jdbc:oracle:thin:@127.0.0.1:1521:nange" user="base" password="123456" > </writeHost> </dataHost>
<dataHost name="jdbchost" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="mongodb" dbDriver="jdbc">
<dataHost name="jdbchost" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="mongodb" dbDriver="jdbc">
<heartbeat>select user()</heartbeat>
<heartbeat>select user()</heartbeat>
<writeHost host="hostM" url="mongodb://192.168.0.99/test" user="admin" password="123456" ></writeHost> </dataHost>
<writeHost host="hostM" url="mongodb://192.168.0.99/test" user="admin" password="123456" ></writeHost> </dataHost>
<dataHost name="sparksql" maxCon="1000" minCon="1" balance="0" dbType="spark" dbDriver="jdbc">
<dataHost name="sparksql" maxCon="1000" minCon="1" balance="0" dbType="spark" dbDriver="jdbc">
<heartbeat> </heartbeat>
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="jdbc:hive2://feng01:10000" user="jifeng" password="jifeng"></writeHost> </dataHost> -->
<writeHost host="hostM1" url="jdbc:hive2://feng01:10000" user="jifeng" password="jifeng"></writeHost> </dataHost> -->
<!-- <dataHost name="jdbchost" maxCon="1000" minCon="10" balance="0" dbType="mysql"
<!-- <dataHost name="jdbchost" maxCon="1000" minCon="10" balance="0" dbType="mysql"
dbDriver="jdbc"> <heartbeat>select user()</heartbeat> <writeHost host="hostM1"
dbDriver="jdbc"> <heartbeat>select user()</heartbeat> <writeHost host="hostM1"
url="jdbc:mysql://localhost:3306" user="root" password="123456"> </writeHost>
url="jdbc:mysql://localhost:3306" user="root" password="123456"> </writeHost>
</dataHost> -->
</dataHost> -->
</mycat:schema>
</mycat:schema>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/sequence_distributed_conf.properties
0 → 100755
View file @
57419171
INSTANCEID
=
01
CLUSTERID
=
01
piplus-backend-v5-mycat-docker/conf/server.xml
View file @
57419171
...
@@ -8,32 +8,88 @@
...
@@ -8,32 +8,88 @@
License for the specific language governing permissions and - limitations
License for the specific language governing permissions and - limitations
under the License. -->
under the License. -->
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server
xmlns:mycat=
"http://
org.opencloudb
/"
>
<mycat:server
xmlns:mycat=
"http://
io.mycat
/"
>
<system>
<system>
<property
name=
"defaultSqlParser"
>
druidparser
</property>
<property
name=
"useSqlStat"
>
0
</property>
<!-- 1为开启实时统计、0为关闭 -->
<property
name=
"useGlobleTableCheck"
>
0
</property>
<!-- 1为开启全加班一致性检测、0为关闭 -->
<property
name=
"sequnceHandlerType"
>
2
</property>
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>-->
<!--设置模拟的MySQL版本号-->
<!-- <property name="processorBufferChunk">40960</property> -->
<!-- <property name="processorBufferChunk">40960</property> -->
<!--
<!--
<property name="processors">1</property>
<property name="processors">1</property>
<property name="processorExecutor">32</property>
<property name="processorExecutor">32</property>
-->
-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena-->
<property
name=
"processorBufferPoolType"
>
0
</property>
<!--默认是65535 64K 用于sql解析时最大文本长度 -->
<!--默认是65535 64K 用于sql解析时最大文本长度 -->
<!--<property name="maxStringLiteralLength">65535</property>-->
<!--<property name="maxStringLiteralLength">65535</property>-->
<!--<property name="sequnceHandlerType">0</property>-->
<!--<property name="sequnceHandlerType">0</property>-->
<!--<property name="backSocketNoDelay">1</property>-->
<!--<property name="backSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--
<!--
<property name="mutiNodeLimitType">1</property> 0:开启小数量级(默认) ;1:开启亿级数据排序
<property name="mutiNodePatchSize">100</property> 亿级数量排序批量
<property name="processors">32</property> <property name="processorExecutor">32</property>
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<property
name=
"handleDistributedTransactions"
>
0
</property>
<!--
off heap for merge/order/group/limit 1开启 0关闭
-->
<property
name=
"useOffHeapForMerge"
>
1
</property>
<!--
单位为m
-->
<property
name=
"memoryPageSize"
>
1m
</property>
<!--
单位为k
-->
<property
name=
"spillsFileBufferSize"
>
1k
</property>
<property
name=
"useStreamOutput"
>
0
</property>
<!--
单位为m
-->
<property
name=
"systemReserveMemorySize"
>
384m
</property>
<!--是否采用zookeeper协调切换 -->
<property
name=
"useZKSwitch"
>
true
</property>
</system>
</system>
<user
name=
"test"
>
<property
name=
"password"
>
test
</property>
<!-- 全局SQL防火墙设置 -->
<!--
<firewall>
<whitehost>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
</whitehost>
<blacklist check="false">
</blacklist>
</firewall>
-->
<user
name=
"root"
>
<property
name=
"password"
>
123456
</property>
<property
name=
"schemas"
>
TESTDB
</property>
<property
name=
"schemas"
>
TESTDB
</property>
<!-- 表级 DML 权限设置 -->
<!--
<privileges check="false">
<schema name="TESTDB" dml="0110" >
<table name="tb01" dml="0000"></table>
<table name="tb02" dml="1111"></table>
</schema>
</privileges>
-->
</user>
</user>
<user
name=
"user"
>
<user
name=
"user"
>
...
@@ -41,14 +97,5 @@
...
@@ -41,14 +97,5 @@
<property
name=
"schemas"
>
TESTDB
</property>
<property
name=
"schemas"
>
TESTDB
</property>
<property
name=
"readOnly"
>
true
</property>
<property
name=
"readOnly"
>
true
</property>
</user>
</user>
<!--
<quarantine>
<whitehost>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
</whitehost>
<blacklist check="false"></blacklist>
</quarantine>
-->
</mycat:server>
</mycat:server>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/sharding-by-enum.txt
0 → 100755
View file @
57419171
10000=0
10010=1
piplus-backend-v5-mycat-docker/conf/wrapper.conf
View file @
57419171
...
@@ -43,7 +43,7 @@ wrapper.java.additional.11=-Xms1G
...
@@ -43,7 +43,7 @@ wrapper.java.additional.11=-Xms1G
#wrapper.java.maxmemory=64
#wrapper.java.maxmemory=64
# Application parameters. Add parameters as needed starting from 1
# Application parameters. Add parameters as needed starting from 1
wrapper
.
app
.
parameter
.
1
=
org
.
opencloudb
.
MycatStartup
wrapper
.
app
.
parameter
.
1
=
io
.
mycat
.
MycatStartup
wrapper
.
app
.
parameter
.
2
=
start
wrapper
.
app
.
parameter
.
2
=
start
#********************************************************************
#********************************************************************
...
...
piplus-backend-v5-mycat-docker/conf/zk-create.yaml
deleted
100755 → 0
View file @
8b52d481
zkURL
:
127.0.0.1:2181
mycat-mysqlgroup
:
hostM1
:
name
:
hostM1
repType
:
'
0'
zone
:
fz
servers
:
-
hostM1
cur-write-server
:
hostM1
auto-write-switch
:
true
heartbeatSQL
:
select user()
mycat-mysqls
:
hostM1
:
name
:
hostM1
ip
:
localhost
port
:
'
3316'
user
:
root
password
:
'
123456'
hostId
:
host
zone
:
fz
mycat-cluster
:
mycat-cluster-1
:
user
:
test
:
name
:
test
password
:
test
schemas
:
-
TESTDB
user
:
name
:
user
password
:
user
schemas
:
-
TESTDB
readOnly
:
'
true'
rule
:
rule1
:
name
:
func1
functionName
:
org.opencloudb.route.function.PartitionByLong
column
:
id
partitionCount
:
'
8'
partitionLength
:
'
128'
rule2
:
name
:
func1
functionName
:
org.opencloudb.route.function.PartitionByLong
column
:
user_id
partitionCount
:
'
8'
partitionLength
:
'
128'
sharding-by-intfile
:
name
:
hash-int
functionName
:
org.opencloudb.route.function.PartitionByFileMap
column
:
sharding_id
config
:
'
10000'
:
'
0'
'
10010'
:
'
1'
auto-sharding-long
:
name
:
rang-long
functionName
:
org.opencloudb.route.function.AutoPartitionByLong
column
:
id
config
:
0-500M
:
'
0'
500M-1000M
:
'
1'
1000M-1500M
:
'
2'
mod-long
:
name
:
mod-long
functionName
:
org.opencloudb.route.function.PartitionByMod
column
:
id
count
:
'
3'
sharding-by-murmur
:
name
:
murmur
functionName
:
org.opencloudb.route.function.PartitionByMurmurHash
column
:
id
seed
:
'
0'
count
:
'
2'
virtualBucketTimes
:
'
160'
sharding-by-month
:
name
:
partbymonth
functionName
:
org.opencloudb.route.function.PartitionByMonth
column
:
create_date
dateFormat
:
yyyy-MM-dd
sBeginDate
:
'
2015-01-01'
latest-month-calldate
:
name
:
latestMonth
functionName
:
org.opencloudb.route.function.LatestMonthPartion
column
:
calldate
splitOneDay
:
'
24'
auto-sharding-rang-mod
:
name
:
rang-mod
functionName
:
org.opencloudb.route.function.PartitionByRangeMod
column
:
id
config
:
0-200M
:
'
5'
200M1-400M
:
'
1'
400M1-600M
:
'
4'
600M1-800M
:
'
4'
800M1-1000M
:
'
6'
jch
:
name
:
jump-consistent-hash
functionName
:
org.opencloudb.route.function.PartitionByJumpConsistentHash
column
:
id
totalBuckets
:
'
3'
schema
:
TESTDB
:
name
:
TESTDB
checkSQLSchema
:
true
defaultMaxLimit
:
100
user
:
name
:
user
datanode
:
dn3,dn2,dn1
ruleName
:
mod-long
primaryKey
:
id
datanode
:
dn1
:
name
:
dn1
database
:
db1
dataHost
:
localhost1
dn2
:
name
:
dn2
database
:
db2
dataHost
:
localhost1
dn3
:
name
:
dn3
database
:
db3
dataHost
:
localhost1
datahost
:
localhost1
:
name
:
localhost1
balance
:
0
maxcon
:
1000
mincon
:
10
dbtype
:
mysql
dbDriver
:
native
writeType
:
0
switchType
:
1
slaveThreshold
:
100
heartbeatSQL
:
select user()
mysqlGroup
:
hostM1
mycat-nodes
:
mycat_fz_01
:
name
:
mycat_fz_01
cluster
:
mycat-cluster-1
systemParams
:
defaultSqlParser
:
druidparser
piplus-backend-v5-mycat-docker/conf/zkconf/auto-sharding-long.txt
0 → 100755
View file @
57419171
2000001-4000000=1
0-2000000=0
4000001-8000000=2
piplus-backend-v5-mycat-docker/conf/zkconf/auto-sharding-rang-mod.txt
0 → 100755
View file @
57419171
800M1-1000M=6
600M1-800M=4
200M1-400M=1
0-200M=5
400M1-600M=4
piplus-backend-v5-mycat-docker/conf/zkconf/autopartition-long.txt
0 → 100755
View file @
57419171
# range start-end ,data node index
# K=1000,M=10000.
0-500M=0
500M-1000M=1
1000M-1500M=2
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/cacheservice.properties
0 → 100755
View file @
57419171
#used for mycat cache service conf
factory.encache
=
io.mycat.cache.impl.EnchachePooFactory
#key is pool name ,value is type,max size, expire seconds
pool.SQLRouteCache
=
encache,10000,1800
pool.ER_SQL2PARENTID
=
encache,1000,1800
layedpool.TableID2DataNodeCache
=
encache,10000,18000
layedpool.TableID2DataNodeCache.TESTDB_ORDERS
=
50000,18000
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/ehcache.xml
0 → 100755
View file @
57419171
<ehcache
xmlns:xsi=
"http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation=
"ehcache.xsd"
maxEntriesLocalHeap=
"100000000"
maxBytesLocalDisk=
"50G"
updateCheck=
"false"
>
<defaultCache
maxElementsInMemory=
"1000000"
eternal=
"false"
overflowToDisk=
"false"
diskSpoolBufferSizeMB=
"30"
maxElementsOnDisk=
"10000000"
diskPersistent=
"false"
diskExpiryThreadIntervalSeconds=
"120"
memoryStoreEvictionPolicy=
"LRU"
/>
</ehcache>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/index_to_charset.properties
0 → 100755
View file @
57419171
1
=
big5
2
=
latin2
3
=
dec8
4
=
cp850
5
=
latin1
6
=
hp8
7
=
koi8r
8
=
latin1
9
=
latin2
10
=
swe7
11
=
ascii
12
=
ujis
13
=
sjis
14
=
cp1251
15
=
latin1
16
=
hebrew
18
=
tis620
19
=
euckr
20
=
latin7
21
=
latin2
22
=
koi8u
23
=
cp1251
24
=
gb2312
25
=
greek
26
=
cp1250
27
=
latin2
28
=
gbk
29
=
cp1257
30
=
latin5
31
=
latin1
32
=
armscii8
33
=
utf8
34
=
cp1250
35
=
ucs2
36
=
cp866
37
=
keybcs2
38
=
macce
39
=
macroman
40
=
cp852
41
=
latin7
42
=
latin7
43
=
macce
44
=
cp1250
45
=
utf8mb4
46
=
utf8mb4
47
=
latin1
48
=
latin1
49
=
latin1
50
=
cp1251
51
=
cp1251
52
=
cp1251
53
=
macroman
54
=
utf16
55
=
utf16
56
=
utf16le
57
=
cp1256
58
=
cp1257
59
=
cp1257
60
=
utf32
61
=
utf32
62
=
utf16le
63
=
binary
64
=
armscii8
65
=
ascii
66
=
cp1250
67
=
cp1256
68
=
cp866
69
=
dec8
70
=
greek
71
=
hebrew
72
=
hp8
73
=
keybcs2
74
=
koi8r
75
=
koi8u
77
=
latin2
78
=
latin5
79
=
latin7
80
=
cp850
81
=
cp852
82
=
swe7
83
=
utf8
84
=
big5
85
=
euckr
86
=
gb2312
87
=
gbk
88
=
sjis
89
=
tis620
90
=
ucs2
91
=
ujis
92
=
geostd8
93
=
geostd8
94
=
latin1
95
=
cp932
96
=
cp932
97
=
eucjpms
98
=
eucjpms
99
=
cp1250
101
=
utf16
102
=
utf16
103
=
utf16
104
=
utf16
105
=
utf16
106
=
utf16
107
=
utf16
108
=
utf16
109
=
utf16
110
=
utf16
111
=
utf16
112
=
utf16
113
=
utf16
114
=
utf16
115
=
utf16
116
=
utf16
117
=
utf16
118
=
utf16
119
=
utf16
120
=
utf16
121
=
utf16
122
=
utf16
123
=
utf16
124
=
utf16
128
=
ucs2
129
=
ucs2
130
=
ucs2
131
=
ucs2
132
=
ucs2
133
=
ucs2
134
=
ucs2
135
=
ucs2
136
=
ucs2
137
=
ucs2
138
=
ucs2
139
=
ucs2
140
=
ucs2
141
=
ucs2
142
=
ucs2
143
=
ucs2
144
=
ucs2
145
=
ucs2
146
=
ucs2
147
=
ucs2
148
=
ucs2
149
=
ucs2
150
=
ucs2
151
=
ucs2
159
=
ucs2
160
=
utf32
161
=
utf32
162
=
utf32
163
=
utf32
164
=
utf32
165
=
utf32
166
=
utf32
167
=
utf32
168
=
utf32
169
=
utf32
170
=
utf32
171
=
utf32
172
=
utf32
173
=
utf32
174
=
utf32
175
=
utf32
176
=
utf32
177
=
utf32
178
=
utf32
179
=
utf32
180
=
utf32
181
=
utf32
182
=
utf32
183
=
utf32
192
=
utf8
193
=
utf8
194
=
utf8
195
=
utf8
196
=
utf8
197
=
utf8
198
=
utf8
199
=
utf8
200
=
utf8
201
=
utf8
202
=
utf8
203
=
utf8
204
=
utf8
205
=
utf8
206
=
utf8
207
=
utf8
208
=
utf8
209
=
utf8
210
=
utf8
211
=
utf8
212
=
utf8
213
=
utf8
214
=
utf8
215
=
utf8
223
=
utf8
224
=
utf8mb4
225
=
utf8mb4
226
=
utf8mb4
227
=
utf8mb4
228
=
utf8mb4
229
=
utf8mb4
230
=
utf8mb4
231
=
utf8mb4
232
=
utf8mb4
233
=
utf8mb4
234
=
utf8mb4
235
=
utf8mb4
236
=
utf8mb4
237
=
utf8mb4
238
=
utf8mb4
239
=
utf8mb4
240
=
utf8mb4
241
=
utf8mb4
242
=
utf8mb4
243
=
utf8mb4
244
=
utf8mb4
245
=
utf8mb4
246
=
utf8mb4
247
=
utf8mb4
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/partition-hash-int.txt
0 → 100755
View file @
57419171
10000=0
10010=1
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/partition-range-mod.txt
0 → 100755
View file @
57419171
# range start-end ,data node group size
0-200M=5
200M1-400M=1
400M1-600M=4
600M1-800M=4
800M1-1000M=6
piplus-backend-v5-mycat-docker/conf/zkconf/rule.xml
0 → 100755
View file @
57419171
<?xml version="1.0" encoding="UTF-8"?>
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License. - You
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
- - Unless required by applicable law or agreed to in writing, software -
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
License for the specific language governing permissions and - limitations
under the License. -->
<!DOCTYPE mycat:rule SYSTEM "rule.dtd">
<mycat:rule
xmlns:mycat=
"http://io.mycat/"
>
<tableRule
name=
"rule1"
>
<rule>
<columns>
id
</columns>
<algorithm>
func1
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"rule2"
>
<rule>
<columns>
user_id
</columns>
<algorithm>
func1
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"sharding-by-intfile"
>
<rule>
<columns>
sharding_id
</columns>
<algorithm>
hash-int
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"auto-sharding-long"
>
<rule>
<columns>
id
</columns>
<algorithm>
rang-long
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"mod-long"
>
<rule>
<columns>
id
</columns>
<algorithm>
mod-long
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"sharding-by-murmur"
>
<rule>
<columns>
id
</columns>
<algorithm>
murmur
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"sharding-by-month"
>
<rule>
<columns>
create_date
</columns>
<algorithm>
partbymonth
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"latest-month-calldate"
>
<rule>
<columns>
calldate
</columns>
<algorithm>
latestMonth
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"auto-sharding-rang-mod"
>
<rule>
<columns>
id
</columns>
<algorithm>
rang-mod
</algorithm>
</rule>
</tableRule>
<tableRule
name=
"jch"
>
<rule>
<columns>
id
</columns>
<algorithm>
jump-consistent-hash
</algorithm>
</rule>
</tableRule>
<function
name=
"murmur"
class=
"io.mycat.route.function.PartitionByMurmurHash"
>
<property
name=
"seed"
>
0
</property>
<!-- 默认是0 -->
<property
name=
"count"
>
2
</property>
<!-- 要分片的数据库节点数量,必须指定,否则没法分片 -->
<property
name=
"virtualBucketTimes"
>
160
</property>
<!-- 一个实际的数据库节点被映射为这么多虚拟节点,默认是160倍,也就是虚拟节点数是物理节点数的160倍 -->
<!-- <property name="weightMapFile">weightMapFile</property> 节点的权重,没有指定权重的节点默认是1。以properties文件的格式填写,以从0开始到count-1的整数值也就是节点索引为key,以节点权重值为值。所有权重值必须是正整数,否则以1代替 -->
<!-- <property name="bucketMapPath">/etc/mycat/bucketMapPath</property>
用于测试时观察各物理节点与虚拟节点的分布情况,如果指定了这个属性,会把虚拟节点的murmur hash值与物理节点的映射按行输出到这个文件,没有默认值,如果不指定,就不会输出任何东西 -->
</function>
<function
name=
"hash-int"
class=
"io.mycat.route.function.PartitionByFileMap"
>
<property
name=
"mapFile"
>
partition-hash-int.txt
</property>
</function>
<function
name=
"rang-long"
class=
"io.mycat.route.function.AutoPartitionByLong"
>
<property
name=
"mapFile"
>
autopartition-long.txt
</property>
</function>
<function
name=
"mod-long"
class=
"io.mycat.route.function.PartitionByMod"
>
<!-- how many data nodes -->
<property
name=
"count"
>
3
</property>
</function>
<function
name=
"func1"
class=
"io.mycat.route.function.PartitionByLong"
>
<property
name=
"partitionCount"
>
8
</property>
<property
name=
"partitionLength"
>
128
</property>
</function>
<function
name=
"latestMonth"
class=
"io.mycat.route.function.LatestMonthPartion"
>
<property
name=
"splitOneDay"
>
24
</property>
</function>
<function
name=
"partbymonth"
class=
"io.mycat.route.function.PartitionByMonth"
>
<property
name=
"dateFormat"
>
yyyy-MM-dd
</property>
<property
name=
"sBeginDate"
>
2015-01-01
</property>
</function>
<function
name=
"rang-mod"
class=
"io.mycat.route.function.PartitionByRangeMod"
>
<property
name=
"mapFile"
>
partition-range-mod.txt
</property>
</function>
<function
name=
"jump-consistent-hash"
class=
"io.mycat.route.function.PartitionByJumpConsistentHash"
>
<property
name=
"totalBuckets"
>
3
</property>
</function>
</mycat:rule>
piplus-backend-v5-mycat-docker/conf/zkconf/schema.xml
0 → 100755
View file @
57419171
<?xml version="1.0"?>
<!DOCTYPE mycat:schema SYSTEM "schema.dtd">
<mycat:schema
xmlns:mycat=
"http://io.mycat/"
>
<schema
name=
"TESTDB"
checkSQLschema=
"false"
sqlMaxLimit=
"100"
>
<!-- auto sharding by id (long) -->
<table
name=
"travelrecord"
dataNode=
"dn1,dn2,dn3"
rule=
"auto-sharding-long"
/>
<!-- global table is auto cloned to all defined data nodes ,so can join
with any table whose sharding node is in the same data node -->
<table
name=
"company"
primaryKey=
"ID"
type=
"global"
dataNode=
"dn1,dn2,dn3"
/>
<table
name=
"goods"
primaryKey=
"ID"
type=
"global"
dataNode=
"dn1,dn2"
/>
<!-- random sharding using mod sharind rule -->
<table
name=
"hotnews"
primaryKey=
"ID"
autoIncrement=
"true"
dataNode=
"dn1,dn2,dn3"
rule=
"mod-long"
/>
<!-- <table name="dual" primaryKey="ID" dataNode="dnx,dnoracle2" type="global"
needAddLimit="false"/> <table name="worker" primaryKey="ID" dataNode="jdbc_dn1,jdbc_dn2,jdbc_dn3"
rule="mod-long" /> -->
<table
name=
"employee"
primaryKey=
"ID"
dataNode=
"dn1,dn2"
rule=
"sharding-by-intfile"
/>
<table
name=
"customer"
primaryKey=
"ID"
dataNode=
"dn1,dn2"
rule=
"sharding-by-intfile"
>
<childTable
name=
"orders"
primaryKey=
"ID"
joinKey=
"customer_id"
parentKey=
"id"
>
<childTable
name=
"order_items"
joinKey=
"order_id"
parentKey=
"id"
/>
</childTable>
<childTable
name=
"customer_addr"
primaryKey=
"ID"
joinKey=
"customer_id"
parentKey=
"id"
/>
</table>
<!-- <table name="oc_call" primaryKey="ID" dataNode="dn1$0-743" rule="latest-month-calldate"
/> -->
</schema>
<!-- <dataNode name="dn1$0-743" dataHost="localhost1" database="db$0-743"
/> -->
<dataNode
name=
"dn1"
dataHost=
"localhost1"
database=
"db1"
/>
<dataNode
name=
"dn2"
dataHost=
"localhost1"
database=
"db2"
/>
<dataNode
name=
"dn3"
dataHost=
"localhost1"
database=
"db3"
/>
<!--<dataNode name="dn4" dataHost="sequoiadb1" database="SAMPLE" />
<dataNode name="jdbc_dn1" dataHost="jdbchost" database="db1" />
<dataNode name="jdbc_dn2" dataHost="jdbchost" database="db2" />
<dataNode name="jdbc_dn3" dataHost="jdbchost" database="db3" /> -->
<dataHost
name=
"localhost1"
maxCon=
"1000"
minCon=
"10"
balance=
"0"
writeType=
"0"
dbType=
"mysql"
dbDriver=
"native"
switchType=
"1"
slaveThreshold=
"100"
>
<heartbeat>
select user()
</heartbeat>
<!-- can have multi write hosts -->
<writeHost
host=
"hostM1"
url=
"localhost:3306"
user=
"root"
password=
"123456"
>
<!-- can have multi read hosts -->
<readHost
host=
"hostS2"
url=
"192.168.1.200:3306"
user=
"root"
password=
"xxx"
/>
</writeHost>
<writeHost
host=
"hostS1"
url=
"localhost:3316"
user=
"root"
password=
"123456"
/>
<!-- <writeHost host="hostM2" url="localhost:3316" user="root" password="123456"/> -->
</dataHost>
<!--
<dataHost name="sequoiadb1" maxCon="1000" minCon="1" balance="0" dbType="sequoiadb" dbDriver="jdbc">
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="sequoiadb://1426587161.dbaas.sequoialab.net:11920/SAMPLE" user="jifeng" password="jifeng"></writeHost>
</dataHost>
<dataHost name="oracle1" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="oracle" dbDriver="jdbc"> <heartbeat>select 1 from dual</heartbeat>
<connectionInitSql>alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss'</connectionInitSql>
<writeHost host="hostM1" url="jdbc:oracle:thin:@127.0.0.1:1521:nange" user="base" password="123456" > </writeHost> </dataHost>
<dataHost name="jdbchost" maxCon="1000" minCon="1" balance="0" writeType="0" dbType="mongodb" dbDriver="jdbc">
<heartbeat>select user()</heartbeat>
<writeHost host="hostM" url="mongodb://192.168.0.99/test" user="admin" password="123456" ></writeHost> </dataHost>
<dataHost name="sparksql" maxCon="1000" minCon="1" balance="0" dbType="spark" dbDriver="jdbc">
<heartbeat> </heartbeat>
<writeHost host="hostM1" url="jdbc:hive2://feng01:10000" user="jifeng" password="jifeng"></writeHost> </dataHost> -->
<!-- <dataHost name="jdbchost" maxCon="1000" minCon="10" balance="0" dbType="mysql"
dbDriver="jdbc"> <heartbeat>select user()</heartbeat> <writeHost host="hostM1"
url="jdbc:mysql://localhost:3306" user="root" password="123456"> </writeHost>
</dataHost> -->
</mycat:schema>
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_conf.properties
0 → 100755
View file @
57419171
#default global sequence
GLOBAL.HISIDS
=
GLOBAL.MINID
=
10001
GLOBAL.MAXID
=
20000
GLOBAL.CURID
=
10000
# self define sequence
COMPANY.HISIDS
=
COMPANY.MINID
=
1001
COMPANY.MAXID
=
2000
COMPANY.CURID
=
1000
CUSTOMER.HISIDS
=
CUSTOMER.MINID
=
1001
CUSTOMER.MAXID
=
2000
CUSTOMER.CURID
=
1000
ORDER.HISIDS
=
ORDER.MINID
=
1001
ORDER.MAXID
=
2000
ORDER.CURID
=
1000
HOTNEWS.HISIDS
=
HOTNEWS.MINID
=
1001
HOTNEWS.MAXID
=
2000
HOTNEWS.CURID
=
1000
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_db_conf.properties
0 → 100755
View file @
57419171
#sequence stored in datanode
GLOBAL
=
dn1
COMPANY
=
dn1
CUSTOMER
=
dn1
ORDERS
=
dn1
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_distributed_conf-mycat_fz_01.properties
0 → 100755
View file @
57419171
INSTANCEID
=
02
CLUSTERID
=
02
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_distributed_conf.properties
0 → 100755
View file @
57419171
INSTANCEID
=
01
CLUSTERID
=
01
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_time_conf-mycat_fz_01.properties
0 → 100755
View file @
57419171
#sequence depend on TIME
WORKID
=
03
DATAACENTERID
=
03
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/sequence_time_conf.properties
0 → 100755
View file @
57419171
#sequence depend on TIME
WORKID
=
01
DATAACENTERID
=
01
\ No newline at end of file
piplus-backend-v5-mycat-docker/conf/zkconf/server-mycat_fz_01.xml
0 → 100755
View file @
57419171
<?xml version="1.0" encoding="UTF-8"?>
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License. - You
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
- - Unless required by applicable law or agreed to in writing, software -
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
License for the specific language governing permissions and - limitations
under the License. -->
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server
xmlns:mycat=
"http://io.mycat/"
>
<system>
<property
name=
"useSqlStat"
>
1
</property>
<!-- 1为开启实时统计、0为关闭 -->
<property
name=
"useGlobleTableCheck"
>
0
</property>
<!-- 1为开启全加班一致性检测、0为关闭 -->
<property
name=
"defaultSqlParser"
>
druidparser
</property>
<property
name=
"sequnceHandlerType"
>
2
</property>
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>-->
<!--设置模拟的MySQL版本号-->
<!-- <property name="processorBufferChunk">40960</property> -->
<!--
<property name="processors">1</property>
<property name="processorExecutor">32</property>
-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena-->
<property
name=
"processorBufferPoolType"
>
0
</property>
<!--默认是65535 64K 用于sql解析时最大文本长度 -->
<!--<property name="maxStringLiteralLength">65535</property>-->
<!--<property name="sequnceHandlerType">0</property>-->
<!--<property name="backSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--
<property name="mutiNodeLimitType">1</property> 0:开启小数量级(默认) ;1:开启亿级数据排序
<property name="mutiNodePatchSize">100</property> 亿级数量排序批量
<property name="processors">32</property> <property name="processorExecutor">32</property>
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<property
name=
"handleDistributedTransactions"
>
0
</property>
<!--
off heap for merge/order/group/limit 1开启 0关闭
-->
<property
name=
"useOffHeapForMerge"
>
1
</property>
<!--
单位为m
-->
<property
name=
"memoryPageSize"
>
1m
</property>
<!--
单位为k
-->
<property
name=
"spillsFileBufferSize"
>
1k
</property>
<property
name=
"useStreamOutput"
>
0
</property>
<!--
单位为m
-->
<property
name=
"systemReserveMemorySize"
>
389m
</property>
</system>
<user
name=
"root"
>
<property
name=
"password"
>
digdeep
</property>
<property
name=
"schemas"
>
TESTDB
</property>
</user>
<user
name=
"user"
>
<property
name=
"password"
>
user
</property>
<property
name=
"schemas"
>
TESTDB
</property>
<property
name=
"readOnly"
>
true
</property>
</user>
<!--
<quarantine>
<whitehost>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
</whitehost>
<blacklist check="false"></blacklist>
</quarantine>
-->
</mycat:server>
piplus-backend-v5-mycat-docker/conf/zkconf/server.xml
0 → 100755
View file @
57419171
<?xml version="1.0" encoding="UTF-8"?>
<!-- - - Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License. - You
may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0
- - Unless required by applicable law or agreed to in writing, software -
distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the
License for the specific language governing permissions and - limitations
under the License. -->
<!DOCTYPE mycat:server SYSTEM "server.dtd">
<mycat:server
xmlns:mycat=
"http://io.mycat/"
>
<system>
<property
name=
"useSqlStat"
>
1
</property>
<!-- 1为开启实时统计、0为关闭 -->
<property
name=
"useGlobleTableCheck"
>
0
</property>
<!-- 1为开启全加班一致性检测、0为关闭 -->
<property
name=
"defaultSqlParser"
>
druidparser
</property>
<property
name=
"sequnceHandlerType"
>
2
</property>
<!-- <property name="useCompression">1</property>-->
<!--1为开启mysql压缩协议-->
<!-- <property name="fakeMySQLVersion">5.6.20</property>-->
<!--设置模拟的MySQL版本号-->
<!-- <property name="processorBufferChunk">40960</property> -->
<!--
<property name="processors">1</property>
<property name="processorExecutor">32</property>
-->
<!--默认为type 0: DirectByteBufferPool | type 1 ByteBufferArena-->
<property
name=
"processorBufferPoolType"
>
0
</property>
<!--默认是65535 64K 用于sql解析时最大文本长度 -->
<!--<property name="maxStringLiteralLength">65535</property>-->
<!--<property name="sequnceHandlerType">0</property>-->
<!--<property name="backSocketNoDelay">1</property>-->
<!--<property name="frontSocketNoDelay">1</property>-->
<!--<property name="processorExecutor">16</property>-->
<!--
<property name="mutiNodeLimitType">1</property> 0:开启小数量级(默认) ;1:开启亿级数据排序
<property name="mutiNodePatchSize">100</property> 亿级数量排序批量
<property name="processors">32</property> <property name="processorExecutor">32</property>
<property name="serverPort">8066</property> <property name="managerPort">9066</property>
<property name="idleTimeout">300000</property> <property name="bindIp">0.0.0.0</property>
<property name="frontWriteQueueSize">4096</property> <property name="processors">32</property> -->
<!--分布式事务开关,0为不过滤分布式事务,1为过滤分布式事务(如果分布式事务内只涉及全局表,则不过滤),2为不过滤分布式事务,但是记录分布式事务日志-->
<property
name=
"handleDistributedTransactions"
>
0
</property>
<!--
off heap for merge/order/group/limit 1开启 0关闭
-->
<property
name=
"useOffHeapForMerge"
>
1
</property>
<!--
单位为m
-->
<property
name=
"memoryPageSize"
>
1m
</property>
<!--
单位为k
-->
<property
name=
"spillsFileBufferSize"
>
1k
</property>
<property
name=
"useStreamOutput"
>
0
</property>
<!--
单位为m
-->
<property
name=
"systemReserveMemorySize"
>
384m
</property>
</system>
<user
name=
"root"
>
<property
name=
"password"
>
digdeep
</property>
<property
name=
"schemas"
>
TESTDB
</property>
</user>
<user
name=
"user"
>
<property
name=
"password"
>
user
</property>
<property
name=
"schemas"
>
TESTDB
</property>
<property
name=
"readOnly"
>
true
</property>
</user>
<!--
<quarantine>
<whitehost>
<host host="127.0.0.1" user="mycat"/>
<host host="127.0.0.2" user="mycat"/>
</whitehost>
<blacklist check="false"></blacklist>
</quarantine>
-->
</mycat:server>
piplus-backend-v5-mycat-docker/conf/zkconf/sharding-by-enum.txt
0 → 100755
View file @
57419171
10000=0
10010=1
piplus-backend-v5-mycat-docker/conf/zkdownload/auto-sharding-long.txt
0 → 100755
View file @
57419171
2000001-4000000=1
0-2000000=0
4000001-8000000=2
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment