Most useful Naxsi rules to maintain - nginx

After many searching on Google without finding anything useful, I would like to know what are the most useful rules of Naxsi to keep (even modified)
and which I can safely ignore.
Below, my current configuration.
##################################
## INTERNAL RULES IDS:1-999 ##
##################################
##MainRule "msg:weird request, unable to parse" id:1;
##MainRule "msg:request too big, stored on disk and not parsed" id:2;
##MainRule "msg:invalid hex encoding, null bytes" id:10;
##MainRule "msg:unknown content-type" id:11;
##MainRule "msg:invalid formatted url" id:12;
##MainRule "msg:invalid POST format" id:13;
##MainRule "msg:invalid POST boundary" id:14;
##MainRule "msg:invalid JSON" id:15;
##MainRule "msg:empty POST" id:16;
##MainRule "msg:libinjection_sql" id:17;
##MainRule "msg:libinjection_xss" id:18;
##################################
## SQL Injections IDs:1000-1099 ##
##################################
MainRule "rx:select|union|update|delete|insert|table|from|ascii|hex|unhex|drop" "msg:sql keywords" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:4" id:1000;
MainRule "str:\"" "msg:double quote" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:8,$XSS:8" id:1001;
MainRule "str:0x" "msg:0x, possible hex encoding" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:2" id:1002;
## Hardcore rules
MainRule "str:/*" "msg:mysql comment (/*)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:8" id:1003;
MainRule "str:*/" "msg:mysql comment (*/)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:8" id:1004;
MainRule "str:|" "msg:mysql keyword (|)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:8" id:1005;
MainRule "str:&&" "msg:mysql keyword (&&)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:8" id:1006;
## end of hardcore rules
MainRule "str:--" "msg:mysql comment (--)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:4" id:1007;
MainRule "str:;" "msg:semicolon" "mz:BODY|URL|ARGS" "s:$SQL:4,$XSS:8" id:1008;
MainRule "str:=" "msg:equal sign in var, probable sql/xss" "mz:ARGS|BODY" "s:$SQL:2" id:1009;
MainRule "str:(" "msg:open parenthesis, probable sql/xss" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$SQL:4,$XSS:8" id:1010;
MainRule "str:)" "msg:close parenthesis, probable sql/xss" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$SQL:4,$XSS:8" id:1011;
MainRule "str:'" "msg:simple quote" "mz:ARGS|BODY|URL|$HEADERS_VAR:Cookie" "s:$SQL:4,$XSS:8" id:1013;
MainRule "str:," "msg:comma" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:4" id:1015;
MainRule "str:#" "msg:mysql comment (#)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:4" id:1016;
MainRule "str:##" "msg:double arobase (##)" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$SQL:4" id:1017;
###############################
## OBVIOUS RFI IDs:1100-1199 ##
###############################
MainRule "str:http://" "msg:http:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1100;
MainRule "str:https://" "msg:https:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1101;
MainRule "str:ftp://" "msg:ftp:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1102;
MainRule "str:php://" "msg:php:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1103;
MainRule "str:sftp://" "msg:sftp:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1104;
MainRule "str:zlib://" "msg:zlib:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1105;
MainRule "str:data://" "msg:data:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1106;
MainRule "str:glob://" "msg:glob:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1107;
MainRule "str:phar://" "msg:phar:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1108;
MainRule "str:file://" "msg:file:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1109;
MainRule "str:gopher://" "msg:gopher:// scheme" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$RFI:8" id:1110;
#######################################
## Directory traversal IDs:1200-1299 ##
#######################################
MainRule "str:.." "msg:double dot" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:4" id:1200;
MainRule "str:/etc/passwd" "msg:obvious probe" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:4" id:1202;
MainRule "str:c:\\" "msg:obvious windows path" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:4" id:1203;
MainRule "str:cmd.exe" "msg:obvious probe" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:4" id:1204;
MainRule "str:\\" "msg:backslash" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:4" id:1205;
#MainRule "str:/" "msg:slash in args" "mz:ARGS|BODY|$HEADERS_VAR:Cookie" "s:$TRAVERSAL:2" id:1206;
########################################
## Cross Site Scripting IDs:1300-1399 ##
########################################
#MainRule "str:<" "msg:html open tag" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1302;
MainRule "str:<" "msg:html open tag" "mz:ARGS|URL|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1302;
#MainRule "str:>" "msg:html close tag" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1303;
MainRule "str:>" "msg:html close tag" "mz:ARGS|URL|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1303;
MainRule "str:[" "msg:open square backet ([), possible js" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$XSS:4" id:1310;
MainRule "str:]" "msg:close square bracket (]), possible js" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$XSS:4" id:1311;
MainRule "str:~" "msg:tilde (~) character" "mz:BODY|URL|ARGS|$HEADERS_VAR:Cookie" "s:$XSS:4" id:1312;
MainRule "str:`" "msg:grave accent (`)" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1314;
MainRule "rx:%[2|3]." "msg:double encoding" "mz:ARGS|URL|BODY|$HEADERS_VAR:Cookie" "s:$XSS:8" id:1315;
####################################
## Evading tricks IDs: 1400-1500 ##
####################################
MainRule "str:&#" "msg:utf7/8 encoding" "mz:ARGS|BODY|URL|$HEADERS_VAR:Cookie" "s:$EVADE:4" id:1400;
MainRule "str:%U" "msg:M$ encoding" "mz:ARGS|BODY|URL|$HEADERS_VAR:Cookie" "s:$EVADE:4" id:1401;
#############################
## File uploads: 1500-1600 ##
#############################
MainRule "rx:\.ph|\.asp|\.ht" "msg:asp/php file upload" "mz:FILE_EXT" "s:$UPLOAD:8" id:1500;
Thanks for any help,
Lorenzo.

Related

OpenLDAP Invalid credentials for readonly user

I try to follow this guide https://www.talkingquickly.co.uk/gitea-sso-with-keycloak-openldap-openid-connect to create SSO solution with OpenLDAP and Keycloak. I'm trying to add the readonly user. It should be the same LDIFs as here https://github.com/osixia/docker-openldap/tree/master/image/service/slapd/assets/config/bootstrap/ldif/readonly-user
I apply those LDIFs for the readonly user but I get
$ ldapsearch -x -H ldap://localhost:1389 -b "dc=muellerpublic,dc=de" -D "cn=readonly,dc=muellerpublic,dc=de" "+" -w xxx
Handling connection for 1389
ldap_bind: Invalid credentials (49)
Here are the users/groups:
$ ldapsearch -x -H ldap://localhost:1389 -b "dc=muellerpublic,dc=de" -D "cn=admin,dc=muellerpublic,dc=de" "+" -w xxx
Handling connection for 1389
# extended LDIF
#
# LDAPv3
# base <dc=muellerpublic,dc=de> with scope subtree
# filter: (objectclass=*)
# requesting: +
#
# muellerpublic.de
dn: dc=muellerpublic,dc=de
structuralObjectClass: organization
entryUUID: ce600638-0d8f-103c-8fb1-1558d46de393
creatorsName: cn=admin,dc=muellerpublic,dc=de
createTimestamp: 20220119162257Z
entryCSN: 20220119162257.152328Z#000000#000#000000
modifiersName: cn=admin,dc=muellerpublic,dc=de
modifyTimestamp: 20220119162257Z
entryDN: dc=muellerpublic,dc=de
subschemaSubentry: cn=Subschema
hasSubordinates: TRUE
# users, muellerpublic.de
dn: ou=users,dc=muellerpublic,dc=de
structuralObjectClass: organizationalUnit
entryUUID: ce601dc6-0d8f-103c-8fb2-1558d46de393
creatorsName: cn=admin,dc=muellerpublic,dc=de
createTimestamp: 20220119162257Z
entryCSN: 20220119162257.152933Z#000000#000#000000
modifiersName: cn=admin,dc=muellerpublic,dc=de
modifyTimestamp: 20220119162257Z
entryDN: ou=users,dc=muellerpublic,dc=de
subschemaSubentry: cn=Subschema
hasSubordinates: FALSE
# readonly, muellerpublic.de
dn: cn=readonly,dc=muellerpublic,dc=de
structuralObjectClass: organizationalRole
entryUUID: ce60b6a0-0d8f-103c-8fb3-1558d46de393
creatorsName: cn=admin,dc=muellerpublic,dc=de
createTimestamp: 20220119162257Z
entryCSN: 20220119162257.156845Z#000000#000#000000
modifiersName: cn=admin,dc=muellerpublic,dc=de
modifyTimestamp: 20220119162257Z
entryDN: cn=readonly,dc=muellerpublic,dc=de
subschemaSubentry: cn=Subschema
hasSubordinates: FALSE
Here are the LDIFs created:
20-readonly-user.ldif: |
# Paths
dn: cn=readonly,dc=muellerpublic,dc=de
changetype: add
cn: readonly
objectClass: simpleSecurityObject
objectClass: organizationalRole
userPassword: {SSHA}5Y0mPhzRCYDBRltdvF6hp+m0DWgPTdjD
description: LDAP read only user
21-readonly-user-acl.config.ldif: |
dn: olcDatabase={2}mdb,cn=config
changetype: modify
replace: olcAccess
olcAccess: to * by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth manage by * break
olcAccess: to attrs=userPassword,shadowLastChange by self write by dn="cn=admin,dc=muellerpublic,dc=de" write by anonymous auth by * none
olcAccess: to * by self read by dn="cn=admin,dc=muellerpublic,dc=de" write by dn="cn=readonly,dc=muellerpublic,dc=de" read by * none

Oozie sqoop hive import : Main class [org.apache.oozie.action.hadoop.SqoopMain], exit code [1]

I have a workflow something looks like this :
<workflow-app xmlns="uri:oozie:workflow:0.2" name="oozie-sqoop">
<start to="sqoop1" />
<action name="sqoop1">
<sqoop xmlns="uri:oozie:sqoop-action:0.4">
<job-tracker>localhost:8032</job-tracker>
<name-node>hdfs://quickstart.cloudera:8020</name-node>
<arg>import</arg>
<arg>--connect</arg>
<arg>jdbc:mysql://8.8.8.8:3306/pro-data</arg>
<arg>--username</arg>
<arg>root</arg>
<arg>--table</arg>
<arg>data_source</arg>
<arg>--hive-import</arg>
</sqoop>
<ok to="end" />
<error to="fail" />
</action>
<kill name="fail">
<message>sqoop failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end" />
</workflow-app>
it always run in to an error which says:
Main class [org.apache.oozie.action.hadoop.SqoopMain], exit code [1]
I can import data in to hdfs using this if i put a --target-dir arg to hdfs but when I use hive-import it won't work , is there anything wrong in my xml?
actually i am using oozie rest api in here. my endpoint and the data looks like the following
http://8.8.8.8:11000/oozie/v1/jobs?jobtype=sqoop
input data:
<?xml version="1.0" encoding="UTF-8"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://quickstart.cloudera:8020</value>
</property>
<property>
<name>mapred.job.tracker</name>
<value>localhost:8032</value>
</property>
<property>
<name>user.name</name>
<value>cloudera</value>
</property>
<property>
<name>oozie.sqoop.command</name>
<value>
import
--connect
jdbc:mysql://ip:3306/pro-data
--username
root
--table
data_source
--hive-home
/user/cloudera/warehouse/
-m
1
--incremental
append
--check-column
id
--hive-import
</value>
</property>
<property>
<name>oozie.libpath</name>
<value>hdfs://quickstart.cloudera:8020/user/oozie/share/lib/lib_20160715181153/sqoop</value>
</property>
<property>
<name>hcat.metastore.uri</name>
<value>thrift://127.0.0.1:9083</value>
</property>
<property>
<name>oozie.use.system.libpath</name>
<value>True</value>
</property>
<property>
<name>oozie.proxysubmission</name>
<value>True</value>
</property>
</configuration>
Log:
016-07-16 14:30:38,171 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#:start:] Start action [0000016-160716103436859-oozie-oozi-W#:start:] with user-retry state : userRetryCount [0], userRetryMax [0], userRetryInterval [10]
2016-07-16 14:30:38,199 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#:start:] [***0000016-160716103436859-oozie-oozi-W#:start:***]Action status=DONE
2016-07-16 14:30:38,204 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#:start:] [***0000016-160716103436859-oozie-oozi-W#:start:***]Action updated in DB!
2016-07-16 14:30:38,475 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] Start action [0000016-160716103436859-oozie-oozi-W#sqoop1] with user-retry state : userRetryCount [0], userRetryMax [0], userRetryInterval [10]
2016-07-16 14:31:17,880 INFO SqoopActionExecutor:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] checking action, hadoop job ID [job_1468690384910_0024] status [RUNNING]
2016-07-16 14:31:17,887 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] [***0000016-160716103436859-oozie-oozi-W#sqoop1***]Action status=RUNNING
2016-07-16 14:31:17,887 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] [***0000016-160716103436859-oozie-oozi-W#sqoop1***]Action updated in DB!
2016-07-16 14:34:40,286 INFO CallbackServlet:520 - SERVER[quickstart.cloudera] USER[-] GROUP[-] TOKEN[-] APP[-] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] callback for action [0000016-160716103436859-oozie-oozi-W#sqoop1]
2016-07-16 14:34:42,001 INFO SqoopActionExecutor:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] checking action, hadoop job ID [job_1468690384910_0024] status [RUNNING]
2016-07-16 14:34:57,679 INFO CallbackServlet:520 - SERVER[quickstart.cloudera] USER[-] GROUP[-] TOKEN[-] APP[-] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] callback for action [0000016-160716103436859-oozie-oozi-W#sqoop1]
2016-07-16 14:34:58,642 INFO SqoopActionExecutor:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] action completed, external ID [job_1468690384910_0024]
2016-07-16 14:34:58,663 WARN SqoopActionExecutor:523 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] Launcher ERROR, reason: Main class [org.apache.oozie.action.hadoop.SqoopMain], exit code [1]
2016-07-16 14:34:58,987 INFO ActionEndXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#sqoop1] ERROR is considered as FAILED for SLA
2016-07-16 14:34:59,299 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#fail] Start action [0000016-160716103436859-oozie-oozi-W#fail] with user-retry state : userRetryCount [0], userRetryMax [0], userRetryInterval [10]
2016-07-16 14:34:59,343 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#fail] [***0000016-160716103436859-oozie-oozi-W#fail***]Action status=DONE
2016-07-16 14:34:59,349 INFO ActionStartXCommand:520 - SERVER[quickstart.cloudera] USER[cloudera] GROUP[-] TOKEN[] APP[oozie-sqoop] JOB[0000016-160716103436859-oozie-oozi-W] ACTION[0000016-160716103436859-oozie-oozi-W#fail] [***0000016-160716103436859-oozie-oozi-W#fail***]Action updated in DB!
Yarn log:
mapreduce.tasktracker.http.threads=40
dfs.stream-buffer-size=4096
tfile.fs.output.buffer.size=262144
fs.permissions.umask-mode=022
dfs.client.datanode-restart.timeout=30
dfs.namenode.resource.du.reserved=104857600
yarn.resourcemanager.am.max-attempts=2
yarn.nodemanager.resource.percentage-physical-cpu-limit=100
ha.failover-controller.graceful-fence.connection.retries=1
mapreduce.job.speculative.speculative-cap-running-tasks=0.1
dfs.datanode.drop.cache.behind.writes=false
hadoop.common.configuration.version=0.23.0
mapreduce.job.ubertask.enable=false
yarn.app.mapreduce.am.resource.cpu-vcores=1
dfs.namenode.replication.work.multiplier.per.iteration=2
mapreduce.job.acl-modify-job=
io.seqfile.local.dir=${hadoop.tmp.dir}/io/local
yarn.resourcemanager.system-metrics-publisher.enabled=false
fs.s3.sleepTimeSeconds=10
mapreduce.client.output.filter=FAILED
------------------------
Sqoop command arguments :
import
--connect
jdbc:mysql://172.16.1.18:3306/pro-data
--username
root
--table
data_source
--hive-home
/user/cloudera/warehouse/
-m
1
--incremental
append
--check-column
id
--hive-import
Fetching child yarn jobs
tag id : oozie-a68d0f5f197314a14720c8ff3935b1dc
Child yarn jobs are found -
=================================================================
>>> Invoking Sqoop command line now >>>
42238 [uber-SubtaskRunner] WARN org.apache.sqoop.tool.SqoopTool - $SQOOP_CONF_DIR has not been set in the environment. Cannot check for additional configuration.
42453 [uber-SubtaskRunner] INFO org.apache.sqoop.Sqoop - Running Sqoop version: 1.4.6-cdh5.5.0
42572 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.BaseSqoopTool - Using Hive-specific delimiters for output. You can override
42572 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.BaseSqoopTool - delimiters with --fields-terminated-by, etc.
42685 [uber-SubtaskRunner] WARN org.apache.sqoop.ConnFactory - $SQOOP_CONF_DIR has not been set in the environment. Cannot check for additional configuration.
43432 [uber-SubtaskRunner] INFO org.apache.sqoop.manager.MySQLManager - Preparing to use a MySQL streaming resultset.
43491 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.CodeGenTool - Beginning code generation
45931 [uber-SubtaskRunner] INFO org.apache.sqoop.manager.SqlManager - Executing SQL statement: SELECT t.* FROM `data_source` AS t LIMIT 1
46198 [uber-SubtaskRunner] INFO org.apache.sqoop.manager.SqlManager - Executing SQL statement: SELECT t.* FROM `data_source` AS t LIMIT 1
46219 [uber-SubtaskRunner] INFO org.apache.sqoop.orm.CompilationManager - HADOOP_MAPRED_HOME is /usr/lib/hadoop-mapreduce
62817 [uber-SubtaskRunner] INFO org.apache.sqoop.orm.CompilationManager - Writing jar file: /tmp/sqoop-yarn/compile/78cb8ad53d1f0fe6f62c936c7688a4b8/data_source.jar
62926 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.ImportTool - Maximal id query for free form incremental import: SELECT MAX(`id`) FROM `data_source`
62937 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.ImportTool - Incremental import based on column `id`
62937 [uber-SubtaskRunner] INFO org.apache.sqoop.tool.ImportTool - Upper bound value: 45
62937 [uber-SubtaskRunner] WARN org.apache.sqoop.manager.MySQLManager - It looks like you are importing from mysql.
62937 [uber-SubtaskRunner] WARN org.apache.sqoop.manager.MySQLManager - This transfer can be faster! Use the --direct
62937 [uber-SubtaskRunner] WARN org.apache.sqoop.manager.MySQLManager - option to exercise a MySQL-specific fast path.
62937 [uber-SubtaskRunner] INFO org.apache.sqoop.manager.MySQLManager - Setting zero DATETIME behavior to convertToNull (mysql)
62979 [uber-SubtaskRunner] INFO org.apache.sqoop.mapreduce.ImportJobBase - Beginning import of data_source
63246 [uber-SubtaskRunner] WARN org.apache.sqoop.mapreduce.JobBase - SQOOP_HOME is unset. May not be able to find all job dependencies.
65748 [uber-SubtaskRunner] INFO org.apache.sqoop.mapreduce.db.DBInputFormat - Using read commited transaction isolation
Heart beat
Heart beat
Heart beat
148412 [uber-SubtaskRunner] INFO org.apache.sqoop.mapreduce.ImportJobBase - Transferred 754 bytes in 85.1475 seconds (8.8552 bytes/sec)
148429 [uber-SubtaskRunner] INFO org.apache.sqoop.mapreduce.ImportJobBase - Retrieved 9 records.
148464 [uber-SubtaskRunner] INFO org.apache.sqoop.util.AppendUtils - Appending to directory data_source
148520 [uber-SubtaskRunner] INFO org.apache.sqoop.util.AppendUtils - Using found partition 2
148685 [uber-SubtaskRunner] INFO org.apache.sqoop.manager.SqlManager - Executing SQL statement: SELECT t.* FROM `data_source` AS t LIMIT 1
148741 [uber-SubtaskRunner] WARN org.apache.sqoop.hive.TableDefWriter - Column created_date had to be cast to a less precise type in Hive
148741 [uber-SubtaskRunner] WARN org.apache.sqoop.hive.TableDefWriter - Column updated_date had to be cast to a less precise type in Hive
148743 [uber-SubtaskRunner] INFO org.apache.sqoop.hive.HiveImport - Loading uploaded data into Hive
Heart beat
Intercepting System.exit(1)
<<< Invocation of Main class completed <<<
Failing Oozie Launcher, Main class [org.apache.oozie.action.hadoop.SqoopMain], exit code [1]
Oozie Launcher failed, finishing Hadoop job gracefully
Oozie Launcher, uploading action data to HDFS sequence file: hdfs://quickstart.cloudera:8020/user/cloudera/oozie-oozi/0000009-160719121646145-oozie-oozi-W/sqoop1--sqoop/action-data.seq
Oozie Launcher ends

OpenLdap "Server is unwilling to perform (53) additional info: no global superior knowledge"

I am new in Ldap, and I can't understand why is giving me that error, when I try to import that file.
I've tried this command:
ldapadd -h elara.alu.com -x -W -D "cn=Manager,dc=alu,dc=com" -f /root/usersFromDavid.ldif
And my olcDatabase={2}bdb.ldif look like this:
#CRC32 dd2c457a
dn: olcDatabase={2}bdb
objectClass: olcDatabaseConfig
objectClass: olcBdbConfig
olcDatabase: {2}bdb
olcSuffix: dc=alu,dc=com
olcAddContentAcl: FALSE
olcLastMod: TRUE
olcMaxDerefDepth: 15
olcReadOnly: FALSE
olcRootDN: cn=Manager,dc=alu,dc=com
olcSyncUseSubentry: FALSE
olcMonitoring: TRUE
olcDbDirectory: /var/lib/ldap
olcDbCacheSize: 1000
olcDbCheckpoint: 1024 15
olcDbNoSync: FALSE
olcDbDirtyRead: FALSE
olcDbIDLcacheSize: 0
olcDbIndex: objectClass pres,eq
olcDbIndex: cn pres,eq,sub
olcDbIndex: uid pres,eq,sub
olcDbIndex: uidNumber pres,eq
olcDbIndex: gidNumber pres,eq
olcDbIndex: ou pres,eq,sub
olcDbIndex: mail pres,eq,sub
olcDbIndex: sn pres,eq,sub
olcDbIndex: givenName pres,eq,sub
olcDbIndex: memberUid pres,eq,sub
olcDbIndex: loginShell pres,eq
olcDbIndex: nisMapName pres,eq,sub
olcDbIndex: nisMapEntry pres,eq,sub
olcDbLinearIndex: FALSE
olcDbMode: 0600
olcDbSearchStack: 16
olcDbShmKey: 0
olcDbCacheFree: 1
olcDbDNcacheSize: 0
structuralObjectClass: olcBdbConfig
entryUUID: 7f7892aa-66a8-1034-968b-61cac64128b9
creatorsName: cn=config
createTimestamp: 20150324193414Z
entryCSN: 20150324193414.304614Z#000000#000#000000
modifiersName: cn=config
modifyTimestamp: 20150324193414Z
olcRootPW: {SSHA}Ih6JIB2w69nqoZksZsa46ORHNnHBKNbI
olcTLSCertificateFile: /etc/pki/tls/certs/example.pem
olcTLSCertificateKeyFile: /etc/pki/tls/certs/examplekey.pem
I tried to add:
dn: o=users
objectclass: extensibleObject
objectclass: top
objectclass: domain
dc: users
o: users
dn: ou=People,o=users,cn=Manager, dc=alu, dc=com
objectclass: top
objectclass: organizationalunit
ou: People
dn: uid=caterinca,ou=People,o=users,cn=Manager, dc=alu, dc=com
objectClass: top
objectClass: inetOrgPerson
objectClass: person
objectClass: organizationalPerson
cn: Caterinca
sn: Caterinca
description: enabled
mail: caterinca#caterinca
title: admin
uid: caterinca
userPassword:: e1NTSEF9Nk0vd2tUY3JSdEpiZUZWU2RzYWszbjhlVWV2eEk4aitCb3psNGc9P

Error install OpenLdap for RedHat6(checksum error on "/etc/openldap/slapd.d/cn=config/olcDatabase={2}bdb.ldif")

I tried to install OpenLdap for linux redhat6, but i recive an error and looks like this
"5511c732 ldif_read_file: checksum error on "/etc/openldap/slapd.d/cn=config/olcDatabase={1}monitor.ldif"
below is the code
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 03c4de5f
dn: olcDatabase={1}monitor
objectClass: olcDatabaseConfig
olcDatabase: {1}monitor
olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=externa
l,cn=auth" read by dn.base="cn=Manager,dc=my-domain,dc=com" read by * none
olcAddContentAcl: FALSE
olcLastMod: TRUE
olcMaxDerefDepth: 15
olcReadOnly: FALSE
olcSyncUseSubentry: FALSE
olcMonitoring: FALSE
structuralObjectClass: olcDatabaseConfig
entryUUID: 7f788d0a-66a8-1034-968a-61cac64128b9
creatorsName: cn=config
createTimestamp: 20150324193414Z
entryCSN: 20150324193414.304614Z#000000#000#000000
modifiersName: cn=config
modifyTimestamp: 20150324193414Z
and
5511c732 ldif_read_file: checksum error on "/etc/openldap/slapd.d/cn=config/olcDatabase={2}bdb.ldif"
below is the code :
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 dd2c457a
dn: olcDatabase={2}bdb
objectClass: olcDatabaseConfig
objectClass: olcBdbConfig
olcDatabase: {2}bdb
olcSuffix: dc=example,dc=com
olcAddContentAcl: FALSE
olcLastMod: TRUE
olcMaxDerefDepth: 15
olcReadOnly: FALSE
olcRootDN: cn=Manager,dc=example,dc=com
olcSyncUseSubentry: FALSE
olcMonitoring: TRUE
olcDbDirectory: /var/lib/ldap
olcDbCacheSize: 1000
olcDbCheckpoint: 1024 15
olcDbNoSync: FALSE
olcDbDirtyRead: FALSE
olcDbIDLcacheSize: 0
olcDbIndex: objectClass pres,eq
olcDbIndex: cn pres,eq,sub
olcDbIndex: uid pres,eq,sub
olcDbIndex: uidNumber pres,eq
olcDbIndex: gidNumber pres,eq
olcDbIndex: ou pres,eq,sub
olcDbIndex: mail pres,eq,sub
olcDbIndex: sn pres,eq,sub
olcDbIndex: givenName pres,eq,sub
olcDbIndex: memberUid pres,eq,sub
olcDbIndex: loginShell pres,eq
olcDbIndex: nisMapName pres,eq,sub
olcDbIndex: nisMapEntry pres,eq,sub
olcDbLinearIndex: FALSE
olcDbMode: 0600
olcDbSearchStack: 16
olcDbShmKey: 0
olcDbCacheFree: 1
olcDbDNcacheSize: 0
structuralObjectClass: olcBdbConfig
entryUUID: 7f7892aa-66a8-1034-968b-61cac64128b9
creatorsName: cn=config
createTimestamp: 20150324193414Z
entryCSN: 20150324193414.304614Z#000000#000#000000
modifiersName: cn=config
modifyTimestamp: 20150324193414Z
olcRootPW: {SSHA}dGaM0fyxrjotXLEKz8Jjl5yoBhpNxLXX
olcTLSCertificateFile: /etc/pki/tls/certs/example.pem
olcTLSCertificateKeyFile: /etc/pki/tls/certs/examplekey.pem
At first error I had modified dn.base="cn=Manager,dc=my-domain,dc=com" =>Manager was with low letter dn.base="cn=manager,dc=my-domain,dc=com"
Second error: - olcSuffix: dc=example,dc=com => was olcSuffix: dc=my-domain,dc=com
- olcRootPW: {SSHA}dGaM0fyxrjotXLEKz8Jjl5yoBhpNxLXX (add)
- olcTLSCertificateFile: /etc/pki/tls/certs/example.pem (add)
- olcTLSCertificateKeyFile: /etc/pki/tls/certs/examplekey.pem(add)
Try the below settings:
vim /etc/profile
press SHIFT + g key combination to go to EOF and add export LC_ALL="en_US.UTF-8"
source /etc/profile

OpenLdap Master / Master Replication Stops When Node Goes Offline

I'm currently using Ubuntu 10.04 (I've set this up on RHEL 5.x but the config is way different in Ubuntu).
Anyway, at first I figured I had everything working. When I made an update to ldap01 I immediately saw the change on ldap02. However, if I take down slapd on ldap02 (or 01) and I add LDAP entries into ldap01, then bring ldap02's slapd process back online I never see the entries created while slapd was down on ldap02. New entries continue to propigate between ldap01 and ldap02 and long as both servers have the slapd process running, but the entries created when slapd was down on ldap02 never propagate to 02.
Here are my configs and ldap versions:
ii slapd 2.4.21-0ubuntu5.7 OpenLDAP server (slapd)
---- /etc/ldap/slapd.d/cn=config/olcDatabase={0}config.ldif -----
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 deacdc79
dn: olcDatabase={0}config
objectClass: olcDatabaseConfig
olcDatabase: {0}config
olcAccess: {0}to * by dn.exact=gidNumber=0+uidNumber=0,cn=peercred,cn=external
,cn=auth manage by * break
olcAccess: {1}to attrs=userPassword,shadowLastChange by self write by anonymou
s auth by dn="cn=admin,dc=example,dc=net" write by * none
olcAccess: {2}to * by self write by dn="cn=admin,dc=example,dc=net" write b
y * read
structuralObjectClass: olcDatabaseConfig
entryUUID: 19eb3cc6-3898-1031-954c-351a2fbb42e9
creatorsName: cn=config
createTimestamp: 20120522202605Z
olcSyncrepl: {0}rid=001 provider="ldap://ldap-01:389" type=refreshAndPer
sist retry="5 5 300 +" searchbase="cn=config" attrs="*,+" bindmethod=simple b
inddn="cn=admin,dc=example,dc=net" credentials="secret"
olcSyncrepl: {1}rid=002 provider="ldap://ldap-02:389" type=refreshAndPer
sist retry="5 5 300 +" searchbase="cn=config" attrs="*,+" bindmethod=simple b
inddn="cn=admin,dc=example,dc=net" credentials="secret"
olcMirrorMode: TRUE
entryCSN: 20120528195647.027315Z#000000#000#000000
modifiersName: cn=admin,dc=example,dc=net
modifyTimestamp: 20120528195647Z
---- /etc/ldap/slapd.d/cn=config/olcDatabase={0}config/olcOverlay={0}syncprov.ldif ----
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 807029fa
dn: olcOverlay={0}syncprov
objectClass: olcOverlayConfig
objectClass: olcSyncProvConfig
olcOverlay: {0}syncprov
olcSpNoPresent: TRUE
structuralObjectClass: olcSyncProvConfig
entryUUID: 3be00cb6-3dee-1031-8f60-519aa1b6f74f
creatorsName: cn=admin,dc=example,dc=net
createTimestamp: 20120529152514Z
entryCSN: 20120529152514.987191Z#000000#000#000000
modifiersName: cn=admin,dc=example,dc=net
modifyTimestamp: 20120529152514Z
---- /etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb.ldif ----
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 1b0a3130
dn: olcDatabase={1}hdb
objectClass: olcDatabaseConfig
objectClass: olcHdbConfig
olcDatabase: {1}hdb
olcDbDirectory: /var/lib/ldap
olcSuffix: dc=example,dc=net
olcAccess: {0}to attrs=userPassword,shadowLastChange by self write by anonymou
s auth by dn="cn=admin,dc=example,dc=net" write by * none
olcAccess: {1}to dn.base="" by * read
olcAccess: {2}to * by self write by dn="cn=admin,dc=example,dc=net" write b
y * read
olcLastMod: TRUE
olcRootDN: cn=admin,dc=example,dc=net
olcRootPW:: e1NTSEF9Ni9IZWJCczRTbmJQYlc4NHFOMWxHWUI5NVNoVUl4U20=
olcDbCheckpoint: 512 30
olcDbConfig: {0}set_cachesize 0 2097152 0
olcDbConfig: {1}set_lk_max_objects 1500
olcDbConfig: {2}set_lk_max_locks 1500
olcDbConfig: {3}set_lk_max_lockers 1500
olcDbIndex: objectClass eq
structuralObjectClass: olcHdbConfig
entryUUID: 19ebfdc8-3898-1031-9554-351a2fbb42e9
creatorsName: cn=config
createTimestamp: 20120522202605Z
olcSyncrepl: {0}rid=001 provider="ldap://ldap-01:389" type=refreshAndPer
sist retry="5 5 300 +" searchbase="dc=example,dc=net" attrs="*,+" bindmeth
od=simple binddn="cn=admin,dc=example,dc=net" credentials="secret"
olcSyncrepl: {1}rid=002 provider="ldap://ldap-02:389" type=refreshAndPer
sist retry="5 5 300 +" searchbase="dc=example,dc=net" attrs="*,+" bindmeth
od=simple binddn="cn=admin,dc=example,dc=net" credentials="secret"
olcMirrorMode: TRUE
entryCSN: 20120528195647.026244Z#000000#000#000000
modifiersName: cn=admin,dc=example,dc=net
modifyTimestamp: 20120528195647Z
----- /etc/ldap/slapd.d/cn=config/olcDatabase={1}hdb/olcOverlay={0}syncprov.ldif ----
# AUTO-GENERATED FILE - DO NOT EDIT!! Use ldapmodify.
# CRC32 807029fa
dn: olcOverlay={0}syncprov
objectClass: olcOverlayConfig
objectClass: olcSyncProvConfig
olcOverlay: {0}syncprov
olcSpNoPresent: TRUE
structuralObjectClass: olcSyncProvConfig
entryUUID: 3be00cb6-3dee-1031-8f60-519aa1b6f74f
creatorsName: cn=admin,dc=example,dc=net
createTimestamp: 20120529152514Z
entryCSN: 20120529152514.987191Z#000000#000#000000
modifiersName: cn=admin,dc=example,dc=net
modifyTimestamp: 20120529152514Z
Upgraded my version of OpenLDAP and my problem went away

Resources