2 ighack ighack 于 2017.01.06 15:53 提问

kafka-node报Snappy codec is not installed

consumer代码如下

 var http = require('http');

http.createServer(function (request, response) {

    // 发送 HTTP 头部 
    // HTTP 状态值: 200 : OK
    // 内容类型: text/plain
    response.writeHead(200, {'Content-Type': 'text/plain'});

    var kafka = require('kafka-node');
    var Consumer = kafka.Consumer;
    var Client = kafka.Client;  
    var client = new Client("localhost:2181");
    var consumer = new Consumer(
        client,
        [
            { topic: 'fltest'}
        ],
        {
            autoCommit: false
        }
    );
    consumer.on('message',function(message){
        console.log(message);
    });
    consumer.on('error',function(err){
        console.log(err);
    });

    // 发送响应数据 "Hello World"
    response.end('Hello World\n');
}).listen(9999);

// 终端打印如下信息
console.log('Server running at http://127.0.0.1:9999/');

Consumer老是报如下错误

 E:\node_modules\.1.0.7@kafka-node\lib\codec\snappy.js:9
    throw new Error('Snappy codec is not installed');
    ^

Error: Snappy codec is not installed
    at Object.unavailableCodec (E:\node_modules\.1.0.7@kafka-node\lib\codec\snap
py.js:9:11)
    at Object.<anonymous> (E:\node_modules\.1.0.7@kafka-node\lib\protocol\protoc
ol.js:183:17)
    at Object.self.tap (E:\node_modules\.0.3.0@binary\index.js:248:12)
    at decodeMessageSet (E:\node_modules\.1.0.7@kafka-node\lib\protocol\protocol
.js:161:8)
    at Object.<anonymous> (E:\node_modules\.1.0.7@kafka-node\lib\protocol\protoc
ol.js:128:26)
    at Object.self.tap (E:\node_modules\.0.3.0@binary\index.js:248:12)
    at Object.decodePartitions (E:\node_modules\.1.0.7@kafka-node\lib\protocol\p
rotocol.js:123:8)
    at Object.self.loop (E:\node_modules\.0.3.0@binary\index.js:267:16)
    at Object.<anonymous> (E:\node_modules\.1.0.7@kafka-node\lib\protocol\protoc
ol.js:57:8)
    at Object.self.loop (E:\node_modules\.0.3.0@binary\index.js:267:16)

但在我的当前目录下有一个.5.0.5@snappy。我知道是怎么回事
在安装snapyy的时候也提示


E:\node_modules\.5.0.5@snappy>node "C:\Users\Administrator\AppData\Roaming\npm\n
ode_modules\cnpm\node_modules\npminstall\node-gyp-bin\\node-gyp.js" rebuild
在此解决方案中一次生成一个项目。若要启用并行生成,请添加“/m”开关。
CL : fatal error C1510: Cannot load language resource clui.dll. [E:\node_module
s\.5.0.5@snappy\build\deps\snappy\snappy.vcxproj]
gyp ERR! build error
gyp ERR! stack Error: `C:\Windows\Microsoft.NET\Framework\v4.0.30319\msbuild.exe
` failed with exit code: 1
gyp ERR! stack     at ChildProcess.onExit (C:\Users\Administrator\AppData\Roamin
g\npm\node_modules\cnpm\node_modules\node-gyp\lib\build.js:276:23)
gyp ERR! stack     at emitTwo (events.js:106:13)
gyp ERR! stack     at ChildProcess.emit (events.js:191:7)
gyp ERR! stack     at Process.ChildProcess._handle.onexit (internal/child_proces
s.js:215:12)
gyp ERR! System Windows_NT 6.1.7601
gyp ERR! command "D:\\Application\\Node.js\\node.exe" "C:\\Users\\Administrator\
\AppData\\Roaming\\npm\\node_modules\\cnpm\\node_modules\\npminstall\\node-gyp-b
in\\node-gyp.js" "rebuild"
gyp ERR! cwd E:\node_modules\.5.0.5@snappy
gyp ERR! node -v v6.9.3
gyp ERR! node-gyp -v v3.4.0
gyp ERR! not ok
Error: post install error, please remove node_modules before retry!
Run "C:\Windows\system32\cmd.exe /d /s /c node-gyp rebuild" error, exit code 1
    at ChildProcess.proc.on.code (C:\Users\Administrator\AppData\Roaming\npm\nod
e_modules\cnpm\node_modules\runscript\index.js:67:21)
    at emitTwo (events.js:106:13)
    at ChildProcess.emit (events.js:191:7)
    at maybeClose (internal/child_process.js:885:16)
    at Process.ChildProcess._handle.onexit (internal/child_process.js:226:5)
npminstall version: 2.16.0
npminstall args: E:\node.exe C:\Users\Administrator\AppData\Roaming\npm\node_mod
ules\cnpm\node_modules\npminstall\bin\install.js --china --userconfig=C:\Users\A
dministrator\.cnpmrc --disturl=https://npm.taobao.org/mirrors/node --registry=ht
tps://registry.npm.taobao.org snappy

新写的代码如下

 var http = require('http');

http.createServer(function (request, response) {

    // 发送 HTTP 头部 
    // HTTP 状态值: 200 : OK
    // 内容类型: text/plain
    response.writeHead(200, {'Content-Type': 'text/plain'});

    var kafka = require('kafka-node'),
    Consumer = kafka.Consumer,
    //HighLevelConsumer = kafka.HighLevelConsumer,
    client = new kafka.Client(),
    consumer = new Consumer(
        client,
        payloads = [
            { topic:'fltest',partition:0},
            { topic:'fltest',partition:1},
            { topic:'fltest',partition:2}
        ],
        options = {
            groupId: 'kafka-node-group',
            autoCommit: false,
            fetchMaxWaitMs: 100,
            fetchMinBytes: 1,
            fetchMaxBytes: 1024 * 1024,
            fromOffset: false,
            encoding: 'utf8'
       }
    );
    //consumer = new HighLevelConsumer(
        //client,
        //[
            //{topic:'fltest'}
        //],
        //{
            //groupId: 'kafka-node-group',
            //autoCommit: false,
            //fetchMaxWaitMs: 100,
            //fetchMinBytes: 1,
            //fetchMaxBytes: 1024 * 1024,
            //fromOffset: false,
            //encoding: 'utf8'
        //}
    //);
    consumer.on('message', function (message) {
            console.log(message);
        });
    consumer.on('error', function (err) {
            //console.log(err);
        });
    //consumer.on('offsetOutOfRange', function (err) {
            //console.log(err);
        //});
    consumer.close(function(){});

    // 发送响应数据 "Hello World"
    response.end('Hello World\n');
}).listen(9999);

// 终端打印如下信息
console.log('Server running at http://127.0.0.1:9999/');

现在报错又变成了

 E:\node_modules\.0.2.2@node-zookeeper-client\lib\ConnectionManager.js:624
        if (!this.socket.write(packet.request.toBuffer())) {
                        ^

TypeError: Cannot read property 'write' of undefined
    at ConnectionManager.onPacketQueueReadable (E:\node_modules\.0.2.2@node-zook
eeper-client\lib\ConnectionManager.js:624:25)
    at emitNone (events.js:86:13)
    at PacketQueue.emit (events.js:185:7)
    at PacketQueue.push (E:\node_modules\.0.2.2@node-zookeeper-client\lib\Packet
Queue.js:35:10)
    at ConnectionManager.queue (E:\node_modules\.0.2.2@node-zookeeper-client\lib
\ConnectionManager.js:711:30)
    at ConnectionManager.close (E:\node_modules\.0.2.2@node-zookeeper-client\lib
\ConnectionManager.js:248:10)
    at Client.close (E:\node_modules\.0.2.2@node-zookeeper-client\index.js:229:2
8)
    at Zookeeper.close (E:\node_modules\.1.0.7@kafka-node\lib\zookeeper.js:468:1
5)
    at Client.close (E:\node_modules\.1.0.7@kafka-node\lib\client.js:163:11)
    at Consumer.close (E:\node_modules\.1.0.7@kafka-node\lib\consumer.js:264:17)


1个回答

ighack
ighack   2017.01.11 10:05
已采纳

我已经放弃了windows ,在windows上snappy无法编译
第二个错误同不要一读取就关闭。把close去掉。在程序关闭的时候在close

Csdn user default icon
上传中...
上传图片
插入图片
准确详细的回答,更有利于被提问者采纳,从而获得C币。复制、灌水、广告等回答会被删除,是时候展现真正的技术了!
其他相关推荐
通过hadoop实现单词的统计,并将统计结果保存到Hbase以及错误排解:org.apache.hadoop.io.compress.SnappyCodec not found
通过hadoop实现单词的统计,并将统计结果保存到Hbase以及错误排解:java.lang.ClassNotFoundException: org.apache.hadoop.io.compress.SnappyCodec 设计思想:通过MR框架来统计给定文件的单词数目,然后把统计结果保存到hbase中
【Hadoop/Hbase】centos上安装并设置Snappy/LZO压缩方式
在hadoop和hbase文件传输可以在压缩之后在进行传输,这样就可以在传输的时候减少传输数据,增大I/O和带宽效率。在hadoop中主要提供了三种压缩方式Gzip、LZO、Snappy三种数据压缩。后面两种需要额外的配置和安装依赖。但是,在hadoop中默认都已经实现接口。
hadoop支持SnappyCodec的步骤
1、需求:让hadoop基础运行环境支持SnappyCodec(这个基础运行环境中运行着NameNode(也是MasterNode)和DataNode(也是ClusterNode)上的Daemon和进程) 2、调用过程:提交给NameNode(也是MasterNode)的Job实例,会对基础运行环境提出调用本地SnappyCodec库的请求,对于hadoop基础运行环境来说,它的本地库主
spark 配置整理
spark 的配置有很多,这里一方面总结一下官方文档中的内容,一方面
用nodejs对kafka、zookeeper进行数据生产Produce
上一篇文章介绍了node对于kafka数据的消费,这一篇文章就讲讲对于kafka数据的生产。 上一篇文章链接:http://blog.csdn.net/xiedong9857/article/details/55506266 其实东西很简单,我利用express搭了一个后台接受数据,然后转发到zookeeper就行了,具体不多讲了,一个两个文件 1、server.js
解决HBase中snappy出错
错误说明最近使用hbase shell时,报错ERROR: org.apache.hadoop.hbase.DoNotRetryIOException: Compression algorithm 'snappy' previously failed test.官网解释参照官方文档的Install Snappy Support小节, HBase does not ship with Snappy
org.apache.hadoop.hbase.DoNotRetryIOException: Compression algorithm 'snappy' previously failed test
执行命令create 'iw:test11',{NAME=>'i',VERSIONS=>1,COMPRESSION=>'SNAPPY'} hbase提示下面的异常 2018-01-16 14:07:43,308 INFO [RS_OPEN_REGION-dashuju172:16020-0] coordination.ZkOpenRegionCoordination: Opening o
hadoop压缩汇总
一 压缩目的 可以减少对集群磁盘空间的占用,减小并行计算数据传输时网络IO 二 压缩种类 SnappyCodec,GzipCodec,BZip2Codec,Lz4Codec,LzoCodec 三 依赖 SnappyCodec与LzoCodec需要本地库的支持 四 本地库的编译 1. Lzo本地库的编译 1.1安装lzo-2.06.tar.gz 1.2步骤:解压;进
kafka-node
ProducerProducer(client,[options]) client:和kafka服务保持连接的client对象 options:一些关于producer的属性 { // Configuration for when to consider a message as acknowledged, default 1 requireAcks: 1, /
Hive学习4_Hive on Spark: Getting Started_Common Issues
Issue Cause Resolution Issue Cause Resolution [ERROR] Terminal initialization failed; falling back to unsupported java.lang.IncompatibleClassChangeErro