I need a function that emits individual lines from a file with newlines. Nothing hard.
But with node, it is hard, and with Meteor, there's an additional complication: you must use Meteor.wrapAsync. Surprisingly, there isn't an example of how to use wrapAsync in the docs, and I could only find a couple of examples online, none of which helped.
I have something like:
var readFileAsync = function (file, cb) {
// From here to below comment works synchronously
var instream = fs.createReadStream(file, function () {
var outstream = new stream;
outstream.readable = true;
outstream.writable = true;
var rl = readline.createInterface({
input: instream,
output: outstream,
terminal: false
});
rl.on('line', function(line) {
console.log(line);
return line;
});
});
// Reference to aforementioned comment
};
var readWatFile = Meteor.wrapAsync(readFileAsync);
var line = readWatFile('/path/to/my/file');
console.log(line);
I know this is wrong because it doesn't work, so how do I write this?
There are two ways to go around it.
Load the whole file into memory and do whatever you want. To do that you can use the Private Assets API
Use node.js streams and stream the file line by line. You would have something like this.
Example code that you would need to tweak to your favorite streaming methods:
var Future = Npm.require('fibers/future');
var byline = Npm.require('byline');
var f = new Future;
// create stream in whatever way you like
var instream = fs.createReadStream(...);
var stream = byline.createStream(instream);
// run stream handling line-by-line events asynchronously
stream.on('data', Meteor.bindEnvironment(function (line) {
if (line) console.log(line)
else future.return();
}));
// await on the future yielding to the other fibers and the line-by-line handling
future.wait();
Related
I'm using Atom with my init.js below, the objective are :
select all
Prettify json
Change grammar to json
I'm having difficulties with step #3, below is my script :
atom.commands.add("atom-workspace", {
"custom:prettify-json": function() {
var view;
view = atom.views.getView(atom.workspace.getActiveTextEditor());
atom.commands.dispatch(view, "core:select-all");
atom.commands.dispatch(view, "pretty-json:prettify");
atom.commands.dispatch(view, "grammar-selector:show");
return true;
}
});
So far I only found that grammar-sellector only has show function, is there any way to choose "source.json" ?
There's the private method setGrammar() that has been around since forever. It should serve the task, unless the developers decide to remove/replace it in future versions.
atom.commands.add("atom-workspace", {
"custom:prettify-json": function() {
const editor = atom.workspace.getActiveTextEditor();
const view = atom.views.getView(editor);
atom.commands.dispatch(view, "core:select-all");
atom.commands.dispatch(view, "pretty-json:prettify");
editor.setGrammar(atom.grammars.grammarForScopeName('source.json'));
return true;
}
});
I am attempting to piece together an example from ml5 on image style transfer (https://ml5js.org/docs/style-transfer-image-example) with p5.js examples parsing a JSON of image URLs, and adding them to an array to display as images. I am hitting a dead end as I do not think I fully understand the ways that p5 stores images in an array, nor do I fully understand the difference between createImg() createImage() or loadImage() (which one to use!!)
The goal is to use Bing image API to return a list of URLS from a search (this part is working fine) and run those images through a pretrained model (this part is working fine when just used on a local image). It is the bringing the two together that I am unable to figure out. Any suggestions or advice (is this even possible??!) greatly appreciated.
I have already tried loading images into an array and iterating through the array in the draw() function. The problem happens when I need to address an image in order to actually apply the style transfer model. It seems like my array is empty when I attempt to refer to it anywhere except draw(). I am sure I am thinking about this incorrectly.
var imageData;
let imgArray = [];
var w = (window.innerWidth)/3;
var h = (window.innerHeight)/4;
var index = 0;
var xPos = 0;
var yPos = 0;
var indexMax = 3;
let style;
let resultImg;
function preload() {
loadData();
}
function loadData(){
var url = api + search + subscriptionKey;
loadJSON(url, gotData);
}
function gotData(data) {
imageData = data;
for (var i=0; i < indexMax; i++){
_url = imageData.value[i].contentUrl;
imgArray.push(loadImage(_url));
}
function displayImages(){
if (index < 3){
index++;
} else {
index = 0;
};
function setup() {
createCanvas(1200, 800).parent('canvasContainer');
var button = select('#display');
button.mousePressed(displayImages);
var transferBtn = select('#transferBtn');
transferBtn.mousePressed(transferImages);
//create style method
style = ml5.styleTransfer('/model', modelLoaded);
}
function draw() {
image(imgArray[index], xPos, yPos, w, h);
}
//ml5 stuff
function modelLoaded() {
if (style.ready){
select('#status').html('Model Loaded');
//style.transfer(gotResult);
}
}
function transferImages(){
select('#status').html('applying style transfer');
style.transfer(tempImg, function(err, result){
createImg(result.src);
});
select('#status').html('done');
}
I am attempting to (unsuccessfully) create a "tempImg" from imgArray[0] to try to figure out where this createImage needs to go, but have not gotten this to work. I have CORS enabled, so I didnt think this was the problem, but am getting the following error. Please help me understand how to think about this differently.
You should use loadImage instead of createImg.
style.transfer(tempImg, function(err, result){
p5CompatibleImage = loadImage(result.src);
});
I have several charts built with dc.js. I can achieve the desired functionality by attaching a callback to each dc.js chart's .on("filterted", function(chart) {}) but this is annoying because I have to attach the same callback to each chart. And error prone because as new charts are added, someone has to remember to attach an event hander. I would prefer to just attach a callback to the underlying crossfilter. Is that possible?
Is there a way to optimize this...
var ndx = crossfilter(data);
var dimAlpha = ndx.dimension(function(d) {return d.alpha});
var dimBeta = ndx.dimension(function(d) {return d.beta});
var groupAlpha = dimAlpha.group().reduceSum(function(d) {return 1;});
var groupBeta = dimBeta.group().reduceSum(function(d) {return 1;});
dc.pieChart(myDomId1)
.dimension(dimAlpha)
.group(groupAlpha)
.on("filtered", function(chart) {
//do stuff
});
dc.pieChart(myDomId2)
.dimension(dimBeta)
.group(groupBeta)
.on("filtered", function(chart) {
//do stuff
});
into something like this...
var ndx = crossfilter(data);
var dimAlpha = ndx.dimension(function(d) {return d.alpha});
var dimBeta = ndx.dimension(function(d) {return d.beta});
var groupAlpha = dimAlpha.group().reduceSum(function(d) {return 1;});
var groupBeta = dimBeta.group().reduceSum(function(d) {return 1;});
dc.pieChart(myDomId1)
.dimension(dimAlpha)
.group(groupAlpha);
dc.pieChart(myDomId2)
.dimension(dimBeta)
.group(groupBeta);
ndx.on("filtered", function() {
//do stuff
})
If you've got a million charts and don't want to have to attach the event listener to each one manually, you could iterate through the chart registry and add them that way. Ex:
dc.chartRegistry.list().forEach(function(chart) {
chart.on('filtered', function() {
// your event listener code goes here.
});
});
Note that this code must go after the charts have instantiated to work.
In the absence of a way to attach the callback once globally, one thing you could do to mitigate the risk from duplicate code is to define the callback function once and pass in a reference instead of defining it inline on each chart.
function my_func() {
// do stuff
}
dc.pieChart(myDomId2)
.dimension(dimBeta)
.group(groupBeta)
.on("filtered", my_func);
chart and filter can also be passed to the filter function something like:
function my_func(chart,filter) {
// do stuff
}
dc.pieChart(myDomId2)
.dimension(dimBeta)
.group(groupBeta)
.on("filtered", my_func);
I want to do further processing depending on the success or failure of the set() method, but I need the context of some objects at the time I call the set() method. Otherwise my objects will be out of scope when the oncomplete function is called unless I put them in global - which I don't really want to do.
Here is an example:
function oncomplete_AddTran(tran,client,appt,balance){
/* if named argument 'balance' exists it is safe to assume
Firebase has not 'stepped on' the arguments with it's single
Error object or null */
if(balance typeof object) console.log("my parameters made it here");
}
function addTran(tran, client, appt, balance) {
var TRANS_LOCATION = 'https://xxx.firebaseIO.com/testing/transactions';
var tranListRef = new Firebase(TRANS_LOCATION);
var oncomplete = function() {
oncomplete_AddTran(tran, client, appt, balance); };
var tranref = tranListRef.child(tran.name).set(tran.literal, oncomplete);
}
Yes, it is possible. I am too impatient waiting for the confirmation I was looking for and decided to test myself. Here is the code I used (that works for my purpose):
function oncomplete_AddTran(tran,client,appt,balance){
console.log("arguments passed:" + arguments.length);
// firebase original arguments :: arguments.callee.caller.arguments
var fbargs = arguments.callee.caller.arguments;
}
function addTran(tran, client, appt, balance) {
var TRANS_LOCATION = "https://xxx.firebaseIO.com/testing/transactions";
var tranListRef = new Firebase(TRANS_LOCATION);
var oncomplete = function() {
oncomplete_AddTran(tran, client, appt, balance); };
var tranref = tranListRef.child(tran.name).set(tran.literal, oncomplete);
}
function main() {
var tran = {}; tran.name = "test1"; tran.literal = { tran: "tran" };
var client = {}; client.literal = { client: "client" };
var appt = {}; appt.literal = { appt:"appt" };
var balance = {}; balance.literal = { balance:"balance" };
addTran(tran,client,appt,balance);
}
The arguments were passed as expected but I still don't know how Firebase's set() method will pass the error object to the callback (in the event of an error) because I haven't tried reproducing an error and don't really know if I can.
The default null, and another (undefined) that is supposed to be passed when there is no error is not found in arguments.callee.caller.arguments (see callback function in example above). I am not sure that what I am doing is good practice - seems a bit hacky to me so I won't accept this answer to the question (yet).
I would like to descend a directory, and examine the name of every file I see there against a regular expression. Basically, a version of the common unix find command, only written in Node.js. I don't care about the order of the files, but I do want to make sure I get all of them.
I have the following code, which is close (I think) to what I want. It takes a startdir, a regexp, and a callback; for each file it analyzes it increments the value of sentinel by one, and when it is done with the analysis it decrements the sentinel. My concern is that if there's one file, and a deeply nested collection of directories, it will analyze that file and trigger the callback long before it finds a second file, and possible the callback will be called twice.
Obviously, I could prevent the callback from being called twice by having a fired variable to restrain it from firing a second time. But that still would give me erroneous data. What am I doing wrong here, and is there a more node-appropriate way to do it?
fs = require('fs')
path = require('path')
function get_all_files(startdir, regexp, callback) {
var sentinel = 0;
var results = [];
function check_sentinel() {
sentinel--;
if (sentinel === 0) {
callback(results);
}
}
function check_file(dir, filename) {
var fname;
sentinel++;
if (regexp.test(filename)) {
results.push(path.join(dir, filename));
}
check_sentinel();
}
function check_directory(dir) {
fs.readdir(path.join(this.rootpath, dirpath), function(err, files) {
var fname, i, len, npath;
if (err) {
throw err
}
for (i = 0, len = files.length; i < len; i++) {
fname = files[i];
npath = path.join(dir, fname);
fs.stat(npath, function(err, stats) {
if (stats.isFile()) {
check_file(dir, fname);
} else {
if (stats.isDirectory()) {
check_directory(npath);
}
}
});
}
});
}
check_directory(startdir);
}
A couple of thoughts...
I have never used it, but the simplest way to do what you are asking might be to use the async.js walkfiles function. See the tests for an example here.
Otherwise, I would consider building an array of function calls and returning the array from your recursive directory walking function (rather than using a sentinel, etc). In other words, check_directory returns an array of function calls matching the files you are looking for. If there is no file, the array is empty.
Finally, combine the array at the top of the recursion and use the async library (not the same as async.js) to execute the array of functions all at once using the parallel function (see this thread for an example using "series").