1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
| 1、安装zookeeper-3.4.6,安装Hbase之前先安装zookeeper。 安装hbase必须要有zookeeper来管理。
1.1)、下载:zookeeper-3.4.6.tar.gz
1.2)、tar zxvf zookeeper-3.4.6.tar.gz
1.3)、mv zookeeper-3.4.6 /home/hadoop/hadoop-1.2.1/
1.4)、mkdir /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data #创建一个文件夹用于存储zookeeper数据
1.5)、mkdir /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/log #创建一个文件夹用于存储zookeeperlog
1.6)、cd /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/conf
1.7)、cp ./zoo_sample.cfg ./zoo.cfg
1.8)、vi zoo.cfg 修改:/home/hadoop/zookeeper-3.4.6/data dataDir=/home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data 添加: dataLogDir=/home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/log 最后在文件最后加如下几行: server.21=master-NameNode:2888:3888 server.22=master-SecondaryNameNode:2888:3888 server.23=master-JobTracker:2888:3888 server.24=slave1-DataNode-TaskTracker:2888:3888 server.25=slave2-DataNode-TaskTracker:2888:3888 server.26=slave3-DataNode-TaskTracker:2888:3888 server.27=slave4-DataNode-TaskTracker:2888:3888 server.10=slave5-DataNode-TaskTracker:2888:3888 参数含义: #initLimit: zookeeper集群中的包含多台server, 其中一台为leader, 集群中其余的server为follower. initLimit参数配置初始化连接时, follower和leader之间的最长心跳时间. 此时该参数设置为5, 说明时间限制为5倍tickTime, 即5*2000=10000ms=10s.
#syncLimit: 该参数配置leader和follower之间发送消息, 请求和应答的最大时间长度. 此时该参数设置为2, 说明时间限制为2倍tickTime, 即4000ms. #server.X=A:B:C 其中X是一个数字, 表示这是第几号server. A是该server所在的IP地址. B配置该server和集群中的leader交换消息所使用的端口. C配置选举leader时所使用的端口. 1.9)、把配置好的zookeeper文件夹scp到其他节点机器,其实就是每台机器上都有这个zookeeper。
scp -r ./zookeeper-3.4.6 root@master-SecondaryNameNode:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@master-JobTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@slave1-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@slave2-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@slave3-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@slave4-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./zookeeper-3.4.6 root@slave5-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ 1.20)、 在之前设置的dataDir中新建myid文件, 写入一个数字, 该数字表示这是第几号server. 该数字必须和zoo.cfg文件中的server.X中的X一一对应。 在master-NameNode节点主机上新建myid文件,输入内容: 21 vi /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data/myid 21 在master-SecondaryNameNode节点主机上新建myid文件,输入内容: 22 vi /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data/myid 22 在master-JobTracker节点主机上新建myid文件,输入内容: 23 vi /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data/myid 23 在slave1-DataNode-TaskTracker节点主机上新建myid文件,输入内容: 24 vi /home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data/myid 24 其他节点主机依次新建myid文件。 1.21)、设置环境变量,在各个节点都要设置 vi /etc/profile 添加: ZOOKEEPER_HOME=/home/hadoop/hadoop-1.2.1/zookeeper-3.4.6 修改: PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_PREFIX/bin:$ZOOKEEPER_HOME/bin 添加: export ZOOKEEPER_HOME source /etc/profile 1.22)、配置完成,在每个节点上可以运行: ../zookeeper-3.4.6/bin/zkServer.sh start
jps查看如果有QuorumPeerMain这个进程就是对了
停止zookeeper:zkServer.sh stop 2、安装hbase-0.98.8
2.1)下载hbase-0.98.8-hadoop1-bin.tar.gz
2.2) tar zxvf hbase-0.98.8-hadoop1-bin.tar.gz mv ./hbase-0.98.8-hadoop1 /home/hadoop/hadoop-1.2.1/hbase-0.98.8
2.3) 配置hbase-env.sh文件 vi /home/hadoop/hadoop-1.2.1/hbase-0.98.8/conf/hbase-env.sh 修改: export JAVA_HOME=/usr/java/jdk1.7.0_71 把export HBASE_MANAGES_ZK=true 改为 export HBASE_MANAGES_ZK=false (因为单独安装了zookeeper,就不使用hbase自带的了) 2.4) 配置hbase-site.xml文件 vi /home/hadoop/hadoop-1.2.1/hbase-0.98.8/conf/hbase-site.xml <configuration> <property> <name>hbase.rootdir</name> <value>hdfs://master-NameNode:9000/hbase</value> </property> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> <property> <name>hbase.zookeeper.quorum</name> <value>master-NameNode,master-SecondaryNameNode,master-JobTracker,slave1-DataNode-TaskTracker,slave2-DataNode-TaskTracker,slave3-DataNode-TaskTracker,slave4-DataNode-TaskTracker,slave5-DataNode-TaskTracker</value> </property> <property> <name>dfs.replication</name> <value>3</value> </property> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/home/hadoop/hadoop-1.2.1/zookeeper-3.4.6/data</value> </property> </configuration> #其中,hbase.zookeeper.quorum这项需要配置出所有的zookeeper节点,包括我们的HMaster这个角色所在的节点(我们这里其实就是master-NameNode这个节点),但是如果选用hbase自带的zookeeper就不需要列出master-NameNode这个节点了,如:<value>master-SecondaryNameNode,master-JobTracker,slave1-DataNode-TaskTracker,slave2-DataNode-TaskTracker,slave3-DataNode-TaskTracker,slave4-DataNode-TaskTracker,slave5-DataNode-TaskTracker</value> 2.5) 配置regionservers文件 vi /home/hadoop/hadoop-1.2.1/hbase-0.98.8/conf/regionservers slave1-DataNode-TaskTracker slave2-DataNode-TaskTracker slave3-DataNode-TaskTracker slave4-DataNode-TaskTracker slave5-DataNode-TaskTracker 2.6) 把配置好的hbase文件夹scp到其他节点机器,其实就是每台机器上都有这个hbase。 scp -r ./hbase-0.98.8 root@master-SecondaryNameNode:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@master-JobTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@slave1-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@slave2-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@slave3-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@slave4-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ scp -r ./hbase-0.98.8 root@slave5-DataNode-TaskTracker:/home/hadoop/hadoop-1.2.1/ 2.7) 配置完成我们就可以启动啦,因为独立的zookeeper,所以启动的时候必须确保所有节点的zookeeper正常启动: ssh到所有节点,zkServer.sh start。 然后在主节点(master-NameNode)上:才能打开hbase,命令为:start-hbase.sh ssh root@master-NameNode /home/hadoop/hadoop-1.2.1/hbase-0.98.8/start-hbase.sh jps去观察各节点上的进程,master-NameNode上应该有HMaster,别的slave机器上应该有HRegionServer进程。 2.8) 关闭的时候顺序应该是反的:hbase->zookeeper->hadoop。 3、安装apache-ant-1.9.4
3.1) 下载apache-ant-1.9.4-bin.tar
3.2) tar xvf apache-ant-1.9.4-bin.tar mv apache-ant-1.9.4 /home/hadoop/hadoop-1.2.1/ 4、安装mysql mysql单独装在一台非hadoop集群里的机器上。本文没有那么多机器,使用172.17.37.27代替。
4.1) 下载mysql-5.5.40.tar.gz
4.2) rm -f /etc/my.cnf
4.3) tar zxf mysql-5.5.40.tar.gz cd mysql-5.5.40
4.4) patch -p1 < ../mysql-openssl.patch 如果patch: command not found则安装patch, yum install patch mysql-openssl.patch 文件内容如下: --- mysql-5.5.31/vio/viossl.c~ 2013-03-25 14:14:58.000000000 +0100 +++ mysql-5.5.31/vio/viossl.c 2013-04-18 16:58:38.552557538 +0200 @@ -172,8 +172,10 @@ SSL_SESSION_set_timeout(SSL_get_session(ssl), timeout); SSL_set_fd(ssl, vio->sd); #ifndef HAVE_YASSL +#ifdef SSL_OP_NO_COMPRESSION SSL_set_options(ssl, SSL_OP_NO_COMPRESSION); #endif +#endif if ((r= connect_accept_func(ssl)) < 1) { 4.5) cmake -DCMAKE_INSTALL_PREFIX=/usr/local/mysql -DEXTRA_CHARSETS=all -DDEFAULT_CHARSET=utf8 -DDEFAULT_COLLATION=utf8_general_ci -DWITH_READLINE=1 -DWITH_SSL=system -DWITH_ZLIB=system -DWITH_EMBEDDED_SERVER=1 -DENABLED_LOCAL_INFILE=1 如果cmake : command not found,则安装:yum install cmake 如果在cmake过程中出现如下错误: Could NOT find Curses (missing: CURSES_LIBRARY CURSES_INCLUDE_PATH) CMake Error at cmake/readline.cmake:83 (MESSAGE): Curses library not found. Please install appropriate package, remove CMakeCache.txt and rerun cmake.On Debian/Ubuntu, package name is libncurses5-dev, on Redhat and derivates it is ncurses-devel. 则:yum install ncurses-devel rm CMakeCache.txt 然后重新执行cmake语句。 4.6) make && make install 4.7) groupadd mysql useradd -s /sbin/nologin -M -g mysql mysql
4.8) cp support-files/my-medium.cnf /etc/my.cnf sed '/skip-external-locking/i\datadir = /usr/local/mysql/var' -i /etc/my.cnf sed -i 's:#innodb:innodb:g' /etc/my.cnf sed -i 's:/usr/local/mysql/data:/usr/local/mysql/var:g' /etc/my.cnf 4.9) /usr/local/mysql/scripts/mysql_install_db --defaults-file=/etc/my.cnf --basedir=/usr/local/mysql --datadir=/usr/local/mysql/var --user=mysql
4.10) chown -R mysql /usr/local/mysql/var chgrp -R mysql /usr/local/mysql/. 4.11) cp support-files/mysql.server /etc/init.d/mysql chmod 755 /etc/init.d/mysql 4.12) cat > /etc/ld.so.conf.d/mysql.conf<<EOF /usr/local/mysql/lib /usr/local/lib EOF ldconfig
4.13) ln -s /usr/local/mysql/lib/mysql /usr/lib/mysql ln -s /usr/local/mysql/include/mysql /usr/include/mysql if [ -d "/proc/vz" ];then ulimit -s unlimited fi
4.14) 启动mysql /etc/init.d/mysql start 4.15) ln -s /usr/local/mysql/bin/mysql /usr/bin/mysql ln -s /usr/local/mysql/bin/mysqldump /usr/bin/mysqldump ln -s /usr/local/mysql/bin/myisamchk /usr/bin/myisamchk ln -s /usr/local/mysql/bin/mysqld_safe /usr/bin/mysqld_safe 4.16) 设置mysql root用户密码 /usr/local/mysql/bin/mysqladmin -u root password zxsoftdb123 cat > /tmp/mysql_sec_script<<EOF use mysql; update user set password=password('zxsoftdb123') where user='root'; delete from user where not (user='root') ; delete from user where user='root' and password=''; drop database test; DROP USER ''@'%'; flush privileges; EOF /usr/local/mysql/bin/mysql -u root -pzxsoftdb123 -h localhost < /tmp/mysql_sec_script rm -f /tmp/mysql_sec_script /etc/init.d/mysql restart
4.14) 关闭 mysql /etc/init.d/mysql stop 4.15)授权 授权用户root使用密码zxsoftdb123从指定ip为172.16.37.21的主机连接到mysql服务器: mysql -uroot -pzxsoftdb123 登陆 mysql shell: GRANT ALL PRIVILEGES ON *.* TO 'root'@'172.16.37.21' IDENTIFIED BY 'zxsoftdb123' WITH GRANT OPTION; flush privileges; 4.16) 设置mysql开机启动 chkconfig --level 345 mysql on /etc/init.d/mysql start 5、安装Hive 在单独的hive集群上安装或者在单独一台机上安装,这里测试时没有那多机器,安装在了172.16.37.21上: 注:由于apache-hive-0.14.0与hadoop1.2.1 结合时会出现一些问题,这里的hive后又改用apache-hive-0.13.1-bin,安装步骤不变,还是和0.14一样。 5.1) 下载apache-hive-0.14.0-bin.tar.gz
5.2) tar zxf apache-hive-0.14.0-bin.tar.gz mv apache-hive-0.14.0-bin /home/hadoop/hadoop-1.2.1/
5.3) 修改配置 cd /home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/conf 拷贝hive-default.xml.template,名字改为: hive-default.xml,这是hive的默认配置。
cp hive-default.xml.template hive-default.xml 建立新文件:hive-site.xml, 这个里面的配置会覆盖hive-default.xml中的配置。
vi hive-site.xml 调参在这个文件中改相应配置。 5.4) 拷贝hive-env.sh.template,名字改为:hive-env.sh。 cp hive-env.sh.template ./hive-env.sh vi hive-env.sh
修改内容如下: export HADOOP_HEAPSIZE=1024 HADOOP_HOME=/home/hadoop/hadoop-1.2.1 export HIVE_CONF_DIR=/home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/conf export HIVE_AUX_JARS_PATH=/home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/lib 5.5) cd /home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/conf cp hive-exec-log4j.properties.template hive-exec-log4j.properties cp hive-log4j.properties.template hive-log4j.properties 5.6) 把hive的元数据放在传统的RDBMS中,这里选择mysql。 安装mysql后,修改my.cnf文件:注释掉bind-address=127.0.0.1这行,是用#号注释。 修改hive-site.xml的配置如下: vi conf/hive-site.xml <?xml version="1.0" encoding="UTF-8" standalone="no"?> <?xml-stylesheet type="text/xsl" href="configuration.xsl"?> <configuration> <property> <name>hive.metastore.warehouse.dir</name> <value>hdfs://zxsoft-hadoop-master-NameNode:9000/user/hive/warehouse</value>#hdfs下的目录 <description>location of default database for the warehouse</description> </property> <property> <name>hive.querylog.location</name> <value>/home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/log</value> <description>Location of Hive run time structured log file</description> </property> <property> <name>hive.exec.scratchdir</name> <value>hdfs://zxsoft-hadoop-master-NameNode:9000/tmp/hive-${user.name}</value> <description>Scratch space for Hive jobs</description> </property> <property> <name>javax.jdo.option.ConnectionURL</name> <value>jdbc:mysql://172.16.37.27:3306/hive?createDatabaseIfNotExist=true</value> </property> <property> <name>javax.jdo.option.ConnectionDriverName</name> <value>com.mysql.jdbc.Driver</value> </property> <property> <name>javax.jdo.option.ConnectionUserName</name> <value>root</value> </property> <property> <name>javax.jdo.option.ConnectionPassword</name> <value>zxsoftdb123</value> </property> </configuration>
5.7) 在hdfs中创建几个目录 $HADOOP_HOME/bin/hadoop fs -mkdir /tmp $HADOOP_HOME/bin/hadoop fs -mkdir /user/hive/warehouse $HADOOP_HOME/bin/hadoop fs -chmod g+w /tmp $HADOOP_HOME/bin/hadoop fs -chmod g+w /user/hive/warehouse 5.8) 在本地建立log目录 mkdir /home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/log 5.8) 下载mysql的驱动到hive的lib文件夹中。 mv mysql-connector-java-5.1.33-bin.jar /home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/lib/ 5.10) 启动hive 配置下环境变量: vi /etc/profile HIVE_HOME=/home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin:$HADOOP_PREFIX/bin:$ZOOKEEPER_HOME/bin:$HIVE_HOME/bin export HIVE_HOME 启动Hive: 执行hive命令: /home/hadoop/hadoop-1.2.1/apache-hive-0.14.0-bin/bin/hive 如果执行hive报如下错误: Exception in thread "main" java.lang.RuntimeException: java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient at org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:444) at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:672) at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:616) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.util.RunJar.main(RunJar.java:160) Caused by: java.lang.RuntimeException: Unable to instantiate org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient at org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1449) at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.<init>(RetryingMetaStoreClient.java:63) at org.apache.hadoop.hive.metastore.RetryingMetaStoreClient.getProxy(RetryingMetaStoreClient.java:73) at org.apache.hadoop.hive.ql.metadata.Hive.createMetaStoreClient(Hive.java:2661) at org.apache.hadoop.hive.ql.metadata.Hive.getMSC(Hive.java:2680) at org.apache.hadoop.hive.ql.session.SessionState.start(SessionState.java:425) ... 7 more Caused by: java.lang.reflect.InvocationTargetException at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at org.apache.hadoop.hive.metastore.MetaStoreUtils.newInstance(MetaStoreUtils.java:1447) ... 12 more Caused by: javax.jdo.JDOUserException: One or more instances could not be made persistent NestedThrowables: org.datanucleus.exceptions.NucleusDataStoreException: Error(s) were found while auto-creating/validating the datastore for classes. The errors are printed in the log, and are attached to this exception. at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistentAll(JDOPersistenceManager.java:787) at org.apache.hadoop.hive.metastore.ObjectStore.grantPrivileges(ObjectStore.java:4071) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at org.apache.hadoop.hive.metastore.RawStoreProxy.invoke(RawStoreProxy.java:98) at com.sun.proxy.$Proxy4.grantPrivileges(Unknown Source) at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultRoles_core(HiveMetaStore.java:646) at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.createDefaultRoles(HiveMetaStore.java:615) at org.apache.hadoop.hive.metastore.HiveMetaStore$HMSHandler.init(HiveMetaStore.java:430) at org.apache.hadoop.hive.metastore.RetryingHMSHandler.<init>(RetryingHMSHandler.java:66) at org.apache.hadoop.hive.metastore.RetryingHMSHandler.getProxy(RetryingHMSHandler.java:72) at org.apache.hadoop.hive.metastore.HiveMetaStore.newRetryingHMSHandler(HiveMetaStore.java:5554) at org.apache.hadoop.hive.metastore.HiveMetaStoreClient.<init>(HiveMetaStoreClient.java:178) at org.apache.hadoop.hive.ql.metadata.SessionHiveMetaStoreClient.<init>(SessionHiveMetaStoreClient.java:73) ... 17 more Caused by: org.datanucleus.exceptions.NucleusDataStoreException: Error(s) were found while auto-creating/validating the datastore for classes. The errors are printed in the log, and are attached to this exception. at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.verifyErrors(RDBMSStoreManager.java:3602) at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3205) at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.run(RDBMSStoreManager.java:2841) at org.datanucleus.store.rdbms.AbstractSchemaTransaction.execute(AbstractSchemaTransaction.java:122) at org.datanucleus.store.rdbms.RDBMSStoreManager.addClasses(RDBMSStoreManager.java:1605) at org.datanucleus.store.AbstractStoreManager.addClass(AbstractStoreManager.java:954) at org.datanucleus.store.rdbms.RDBMSStoreManager.getDatastoreClass(RDBMSStoreManager.java:679) at org.datanucleus.store.rdbms.RDBMSStoreManager.getPropertiesForGenerator(RDBMSStoreManager.java:2045) at org.datanucleus.store.AbstractStoreManager.getStrategyValue(AbstractStoreManager.java:1365) at org.datanucleus.ExecutionContextImpl.newObjectId(ExecutionContextImpl.java:3827) at org.datanucleus.state.JDOStateManager.setIdentity(JDOStateManager.java:2571) at org.datanucleus.state.JDOStateManager.initialiseForPersistentNew(JDOStateManager.java:513) at org.datanucleus.state.ObjectProviderFactoryImpl.newForPersistentNew(ObjectProviderFactoryImpl.java:232) at org.datanucleus.ExecutionContextImpl.newObjectProviderForPersistentNew(ExecutionContextImpl.java:1414) at org.datanucleus.ExecutionContextImpl.persistObjectInternal(ExecutionContextImpl.java:2218) at org.datanucleus.ExecutionContextImpl.persistObjectWork(ExecutionContextImpl.java:2065) at org.datanucleus.ExecutionContextImpl.persistObjects(ExecutionContextImpl.java:2005) at org.datanucleus.ExecutionContextThreadedImpl.persistObjects(ExecutionContextThreadedImpl.java:231) at org.datanucleus.api.jdo.JDOPersistenceManager.makePersistentAll(JDOPersistenceManager.java:776) ... 32 more Caused by: com.mysql.jdbc.exceptions.jdbc4.MySQLSyntaxErrorException: Specified key was too long; max key length is 1000 bytes at sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:57) at sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.lang.reflect.Constructor.newInstance(Constructor.java:526) at com.mysql.jdbc.Util.handleNewInstance(Util.java:377) at com.mysql.jdbc.Util.getInstance(Util.java:360) at com.mysql.jdbc.SQLError.createSQLException(SQLError.java:978) at com.mysql.jdbc.MysqlIO.checkErrorPacket(MysqlIO.java:3887) at com.mysql.jdbc.MysqlIO.checkErrorPacket(MysqlIO.java:3823) at com.mysql.jdbc.MysqlIO.sendCommand(MysqlIO.java:2435) at com.mysql.jdbc.MysqlIO.sqlQueryDirect(MysqlIO.java:2582) at com.mysql.jdbc.ConnectionImpl.execSQL(ConnectionImpl.java:2526) at com.mysql.jdbc.ConnectionImpl.execSQL(ConnectionImpl.java:2484) at com.mysql.jdbc.StatementImpl.execute(StatementImpl.java:848) at com.mysql.jdbc.StatementImpl.execute(StatementImpl.java:742) at com.jolbox.bonecp.StatementHandle.execute(StatementHandle.java:254) at org.datanucleus.store.rdbms.table.AbstractTable.executeDdlStatement(AbstractTable.java:760) at org.datanucleus.store.rdbms.table.TableImpl.createIndices(TableImpl.java:648) at org.datanucleus.store.rdbms.table.TableImpl.createConstraints(TableImpl.java:422) at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.performTablesValidation(RDBMSStoreManager.java:3459) at org.datanucleus.store.rdbms.RDBMSStoreManager$ClassAdder.addClassTablesAndValidate(RDBMSStoreManager.java:3190) ... 49 more 则说明mysql数据库存储字段有误,需要在数据库中执行:alter database hive character set latin1; 命令如下: 登陆mysql所在的服务器主机,进入mysql管理shell: mysql -uroot -pzxsoftdb123 alter database hive character set latin1; #把hive数据库的字符集改为latin1。 再次执行 ./hive 命令,又报如下错误: Exception in thread "main" java.lang.RuntimeException: java.lang.RuntimeException: The root scratch dir: /tmp/hive-root on HDFS should be writable. Current permissions are: rwx--x--x 解决方法: hadoop fs -chmod +w /tmp/hive-root 给/tmp/hive-root加上可写的权限。 hive> show tables; 如果正常就说明配置成功。
|