Skip to content

Commit d5725f3

Browse files
author
dapeng
committed
Merge remote-tracking branch 'origin/1.8_release_3.10.x' into 1.8_release-github
2 parents d6e7274 + 491a7a8 commit d5725f3

File tree

29 files changed

+1169
-136
lines changed

29 files changed

+1169
-136
lines changed

.gitignore

+1
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@ target/
88
*.eclipse.*
99
*.iml
1010
plugins/
11+
sqlplugins/
1112
lib/
1213
.vertx/
1314
.DS_Store

core/src/main/java/com/dtstack/flink/sql/option/Options.java

+13
Original file line numberDiff line numberDiff line change
@@ -72,6 +72,10 @@ public class Options {
7272
@OptionRequired(description = "log level")
7373
private String logLevel = "info";
7474

75+
@OptionRequired(description = "file add to ship file")
76+
private String addShipfile;
77+
78+
7579
public String getMode() {
7680
return mode;
7781
}
@@ -183,4 +187,13 @@ public String getLogLevel() {
183187
public void setLogLevel(String logLevel) {
184188
this.logLevel = logLevel;
185189
}
190+
191+
public String getAddShipfile() {
192+
return addShipfile;
193+
}
194+
195+
public void setAddShipfile(String addShipfile) {
196+
this.addShipfile = addShipfile;
197+
}
198+
186199
}

core/src/main/java/com/dtstack/flink/sql/parser/CreateTmpTableParser.java

+5-5
Original file line numberDiff line numberDiff line change
@@ -22,11 +22,7 @@
2222

2323
import com.dtstack.flink.sql.util.DtStringUtil;
2424
import org.apache.calcite.config.Lex;
25-
import org.apache.calcite.sql.SqlBasicCall;
26-
import org.apache.calcite.sql.SqlJoin;
27-
import org.apache.calcite.sql.SqlKind;
28-
import org.apache.calcite.sql.SqlNode;
29-
import org.apache.calcite.sql.SqlSelect;
25+
import org.apache.calcite.sql.*;
3026
import org.apache.calcite.sql.parser.SqlParseException;
3127
import org.apache.calcite.sql.parser.SqlParser;
3228
import com.google.common.collect.Lists;
@@ -164,6 +160,10 @@ private static void parseNode(SqlNode sqlNode, CreateTmpTableParser.SqlParserRes
164160
parseNode(unionRight, sqlParseResult);
165161
}
166162
break;
163+
case MATCH_RECOGNIZE:
164+
SqlMatchRecognize node = (SqlMatchRecognize) sqlNode;
165+
sqlParseResult.addSourceTable(node.getTableRef().toString());
166+
break;
167167
default:
168168
//do nothing
169169
break;

core/src/main/java/com/dtstack/flink/sql/parser/InsertSqlParser.java

+3-2
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@
1616
* limitations under the License.
1717
*/
1818

19-
2019

2120
package com.dtstack.flink.sql.parser;
2221

@@ -153,14 +152,16 @@ private static void parseNode(SqlNode sqlNode, SqlParseResult sqlParseResult){
153152

154153
/**
155154
* 将第一层 select 中的 sqlNode 转化为 AsNode,解决字段名冲突问题
155+
* 仅对 table.xx 这种类型的字段进行替换
156156
* @param selectList select Node 的 select 字段
157157
* @param sqlSelect 第一层解析出来的 selectNode
158158
*/
159159
private static void rebuildSelectNode(SqlNodeList selectList, SqlSelect sqlSelect) {
160160
SqlNodeList sqlNodes = new SqlNodeList(selectList.getParserPosition());
161161

162162
for (int index = 0; index < selectList.size(); index++) {
163-
if (selectList.get(index).getKind().equals(SqlKind.AS)) {
163+
if (selectList.get(index).getKind().equals(SqlKind.AS)
164+
|| ((SqlIdentifier) selectList.get(index)).names.size() == 1) {
164165
sqlNodes.add(selectList.get(index));
165166
continue;
166167
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*/
18+
19+
package com.dtstack.flink.sql.util;
20+
21+
import org.apache.commons.io.FileUtils;
22+
23+
import java.io.File;
24+
import java.io.IOException;
25+
import java.util.HashMap;
26+
import java.util.Map;
27+
28+
/**
29+
* Utility methods for helping with security tasks.
30+
* Date: 2019/12/28
31+
* Company: www.dtstack.com
32+
* @author maqi
33+
*/
34+
public class AuthUtil {
35+
36+
public static String creatJaasFile(String prefix, String suffix, JAASConfig jaasConfig) throws IOException {
37+
File krbConf = new File(System.getProperty("user.dir"));
38+
File temp = File.createTempFile(prefix, suffix, krbConf);
39+
temp.deleteOnExit();
40+
FileUtils.writeStringToFile(temp, jaasConfig.toString());
41+
return temp.getAbsolutePath();
42+
}
43+
44+
45+
public static class JAASConfig {
46+
private String entryName;
47+
private String loginModule;
48+
private String loginModuleFlag;
49+
private Map<String, String> loginModuleOptions;
50+
51+
public JAASConfig(String entryName, String loginModule, String loginModuleFlag, Map<String, String> loginModuleOptions) {
52+
this.entryName = entryName;
53+
this.loginModule = loginModule;
54+
this.loginModuleFlag = loginModuleFlag;
55+
this.loginModuleOptions = loginModuleOptions;
56+
}
57+
58+
public static Builder builder() {
59+
return new Builder();
60+
}
61+
62+
@Override
63+
public String toString() {
64+
StringBuilder stringBuilder = new StringBuilder(entryName).append(" {\n\t")
65+
.append(loginModule).append(" ").append(loginModuleFlag).append("\n\t");
66+
String[] keys = loginModuleOptions.keySet().toArray(new String[loginModuleOptions.size()]);
67+
for (int i = 0; i < keys.length; i++) {
68+
stringBuilder.append(keys[i]).append("=").append(loginModuleOptions.get(keys[i]));
69+
if (i != keys.length - 1) {
70+
stringBuilder.append("\n\t");
71+
} else {
72+
stringBuilder.append(";\n");
73+
}
74+
75+
}
76+
stringBuilder.append("\n").append("};");
77+
return stringBuilder.toString();
78+
}
79+
80+
public static class Builder {
81+
private String entryName;
82+
private String loginModule;
83+
private String loginModuleFlag;
84+
private Map<String, String> loginModuleOptions;
85+
86+
public Builder setEntryName(String entryName) {
87+
this.entryName = entryName;
88+
return this;
89+
}
90+
91+
public Builder setLoginModule(String loginModule) {
92+
this.loginModule = loginModule;
93+
return this;
94+
}
95+
96+
public Builder setLoginModuleFlag(String loginModuleFlag) {
97+
this.loginModuleFlag = loginModuleFlag;
98+
return this;
99+
}
100+
101+
public Builder setLoginModuleOptions(Map<String, String> loginModuleOptions) {
102+
this.loginModuleOptions = loginModuleOptions;
103+
return this;
104+
}
105+
106+
public JAASConfig build() {
107+
return new JAASConfig(
108+
entryName, loginModule, loginModuleFlag, loginModuleOptions);
109+
}
110+
}
111+
}
112+
}

docs/config.md

+5
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,11 @@ sh submit.sh -key1 val1 -key2 val2
4646
* 描述:扩展jar路径,当前主要是UDF定义的jar;
4747
* 必选:否
4848
* 默认值:无
49+
50+
* **addShipfile**
51+
* 描述:扩展上传的文件,比如开启;Kerberos认证需要的keytab文件和krb5.conf文件
52+
* 必选:否
53+
* 默认值:无
4954

5055
* **confProp**
5156
* 描述:一些参数设置

docs/plugin/hbaseSide.md

+79-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,14 @@
4343
| tableName | hbase 的表名称|||
4444
| cache | 维表缓存策略(NONE/LRU)||NONE|
4545
| partitionedJoin | 是否在維表join之前先根据 設定的key 做一次keyby操作(可以減少维表的数据缓存量)||false|
46-
46+
|kerberosAuthEnable | 是否开启kerberos认证||false|
47+
|regionserverPrincipal | regionserver的principal,这个值从hbase-site.xml的hbase.regionserver.kerberos.principal属性中获取|||
48+
|clientKeytabFile|client的keytab 文件||
49+
|clientPrincipal|client的principal|||
50+
|zookeeperSaslClient | zookeeper.sasl.client值||true|
51+
|securityKrb5Conf | java.security.krb5.conf值|||
52+
另外开启Kerberos认证还需要在VM参数中配置krb5, -Djava.security.krb5.conf=/Users/xuchao/Documents/flinkSql/kerberos/krb5.conf
53+
同时在addShipfile参数中添加keytab文件的路径,参数具体细节请看[命令参数说明](../config.md)
4754
--------------
4855

4956
## 5.样例
@@ -168,4 +175,75 @@ into
168175
sideTable b
169176
on a.id=b.rowkey1 and a.name = b.rowkey2;
170177
```
178+
### kerberos维表示例
179+
```
180+
CREATE TABLE MyTable(
181+
name varchar,
182+
channel varchar,
183+
pv INT,
184+
xctime bigint
185+
)WITH(
186+
type ='kafka11',
187+
bootstrapServers ='172.16.8.107:9092',
188+
zookeeperQuorum ='172.16.8.107:2181/kafka',
189+
offsetReset ='latest',
190+
topic ='es_test',
191+
timezone='Asia/Shanghai',
192+
updateMode ='append',
193+
enableKeyPartitions ='false',
194+
topicIsPattern ='false',
195+
parallelism ='1'
196+
);
197+
198+
CREATE TABLE MyResult(
199+
name varchar,
200+
channel varchar
201+
)WITH(
202+
type ='mysql',
203+
url ='jdbc:mysql://172.16.10.45:3306/test',
204+
userName ='dtstack',
205+
password ='abc123',
206+
tableName ='myresult',
207+
updateMode ='append',
208+
parallelism ='1',
209+
batchSize ='100',
210+
batchWaitInterval ='1000'
211+
);
212+
213+
CREATE TABLE sideTable(
214+
cf:name varchar as name,
215+
cf:info varchar as info,
216+
PRIMARY KEY(md5(name) +'test') ,
217+
PERIOD FOR SYSTEM_TIME
218+
)WITH(
219+
type ='hbase',
220+
zookeeperQuorum ='172.16.10.104:2181,172.16.10.224:2181,172.16.10.252:2181',
221+
zookeeperParent ='/hbase',
222+
tableName ='workerinfo',
223+
partitionedJoin ='false',
224+
cache ='LRU',
225+
cacheSize ='10000',
226+
cacheTTLMs ='60000',
227+
asyncTimeoutNum ='0',
228+
parallelism ='1',
229+
kerberosAuthEnable='true',
230+
regionserverPrincipal='hbase/[email protected]',
231+
clientKeytabFile='test.keytab',
232+
clientPrincipal='[email protected]',
233+
securityKrb5Conf='krb5.conf',
234+
);
171235
236+
insert into
237+
MyResult
238+
select
239+
b.name as name,
240+
a.channel
241+
242+
from
243+
MyTable a
244+
245+
join
246+
sideTable b
247+
248+
on a.channel=b.name
249+
```

docs/plugin/hbaseSink.md

+61-3
Original file line numberDiff line numberDiff line change
@@ -37,9 +37,17 @@ hbase2.0
3737
|rowkey | hbase的rowkey关联的列信息,多个值以逗号隔开|||
3838
|updateMode|APPEND:不回撤数据,只下发增量数据,UPSERT:先删除回撤数据,然后更新|否|APPEND|
3939
|parallelism | 并行度设置||1|
40-
41-
40+
|kerberosAuthEnable | 是否开启kerberos认证||false|
41+
|regionserverPrincipal | regionserver的principal,这个值从hbase-site.xml的hbase.regionserver.kerberos.principal属性中获取|||
42+
|clientKeytabFile|client的keytab 文件||
43+
|clientPrincipal|client的principal|||
44+
|zookeeperSaslClient | zookeeper.sasl.client值||true|
45+
|securityKrb5Conf | java.security.krb5.conf值|||
46+
另外开启Kerberos认证还需要在VM参数中配置krb5, -Djava.security.krb5.conf=/Users/xuchao/Documents/flinkSql/kerberos/krb5.conf
47+
同时在addShipfile参数中添加keytab文件的路径,参数具体细节请看[命令参数说明](../config.md)
4248
## 5.样例:
49+
50+
### 普通结果表语句示例
4351
```
4452
CREATE TABLE MyTable(
4553
name varchar,
@@ -78,9 +86,59 @@ into
7886
channel,
7987
name
8088
from
81-
MyTable a
89+
MyTable a
90+
91+
8292
```
8393

94+
### kerberos认证结果表语句示例
95+
```
96+
CREATE TABLE MyTable(
97+
name varchar,
98+
channel varchar,
99+
age int
100+
)WITH(
101+
type ='kafka10',
102+
bootstrapServers ='172.16.8.107:9092',
103+
zookeeperQuorum ='172.16.8.107:2181/kafka',
104+
offsetReset ='latest',
105+
topic ='mqTest01',
106+
timezone='Asia/Shanghai',
107+
updateMode ='append',
108+
enableKeyPartitions ='false',
109+
topicIsPattern ='false',
110+
parallelism ='1'
111+
);
112+
113+
CREATE TABLE MyResult(
114+
cf:name varchar ,
115+
cf:channel varchar
116+
)WITH(
117+
type ='hbase',
118+
zookeeperQuorum ='cdh2.cdhsite:2181,cdh4.cdhsite:2181',
119+
zookeeperParent ='/hbase',
120+
tableName ='myresult',
121+
partitionedJoin ='false',
122+
parallelism ='1',
123+
rowKey='name',
124+
kerberosAuthEnable='true',
125+
regionserverPrincipal='hbase/[email protected]',
126+
clientKeytabFile='test.keytab',
127+
clientPrincipal='[email protected]',
128+
securityKrb5Conf='krb5.conf',
129+
);
130+
131+
insert
132+
into
133+
MyResult
134+
select
135+
channel,
136+
name
137+
from
138+
MyTable a
139+
140+
```
141+
84142
## 6.hbase数据
85143
### 数据内容说明
86144
hbase的rowkey 构建规则:以描述的rowkey字段值作为key,多个字段以'-'连接

0 commit comments

Comments
 (0)