Node.js: When do you know when a collection of asynchronous tasks is done? - asynchronous

I would like to descend a directory, and examine the name of every file I see there against a regular expression. Basically, a version of the common unix find command, only written in Node.js. I don't care about the order of the files, but I do want to make sure I get all of them.
I have the following code, which is close (I think) to what I want. It takes a startdir, a regexp, and a callback; for each file it analyzes it increments the value of sentinel by one, and when it is done with the analysis it decrements the sentinel. My concern is that if there's one file, and a deeply nested collection of directories, it will analyze that file and trigger the callback long before it finds a second file, and possible the callback will be called twice.
Obviously, I could prevent the callback from being called twice by having a fired variable to restrain it from firing a second time. But that still would give me erroneous data. What am I doing wrong here, and is there a more node-appropriate way to do it?
fs = require('fs')
path = require('path')
function get_all_files(startdir, regexp, callback) {
var sentinel = 0;
var results = [];
function check_sentinel() {
sentinel--;
if (sentinel === 0) {
callback(results);
}
}
function check_file(dir, filename) {
var fname;
sentinel++;
if (regexp.test(filename)) {
results.push(path.join(dir, filename));
}
check_sentinel();
}
function check_directory(dir) {
fs.readdir(path.join(this.rootpath, dirpath), function(err, files) {
var fname, i, len, npath;
if (err) {
throw err
}
for (i = 0, len = files.length; i < len; i++) {
fname = files[i];
npath = path.join(dir, fname);
fs.stat(npath, function(err, stats) {
if (stats.isFile()) {
check_file(dir, fname);
} else {
if (stats.isDirectory()) {
check_directory(npath);
}
}
});
}
});
}
check_directory(startdir);
}

A couple of thoughts...
I have never used it, but the simplest way to do what you are asking might be to use the async.js walkfiles function. See the tests for an example here.
Otherwise, I would consider building an array of function calls and returning the array from your recursive directory walking function (rather than using a sentinel, etc). In other words, check_directory returns an array of function calls matching the files you are looking for. If there is no file, the array is empty.
Finally, combine the array at the top of the recursion and use the async library (not the same as async.js) to execute the array of functions all at once using the parallel function (see this thread for an example using "series").

Related

createImage() vs createImg() vs loadImage() in p5. which to use to load in an array of images for use in ml5?

I am attempting to piece together an example from ml5 on image style transfer (https://ml5js.org/docs/style-transfer-image-example) with p5.js examples parsing a JSON of image URLs, and adding them to an array to display as images. I am hitting a dead end as I do not think I fully understand the ways that p5 stores images in an array, nor do I fully understand the difference between createImg() createImage() or loadImage() (which one to use!!)
The goal is to use Bing image API to return a list of URLS from a search (this part is working fine) and run those images through a pretrained model (this part is working fine when just used on a local image). It is the bringing the two together that I am unable to figure out. Any suggestions or advice (is this even possible??!) greatly appreciated.
I have already tried loading images into an array and iterating through the array in the draw() function. The problem happens when I need to address an image in order to actually apply the style transfer model. It seems like my array is empty when I attempt to refer to it anywhere except draw(). I am sure I am thinking about this incorrectly.
var imageData;
let imgArray = [];
var w = (window.innerWidth)/3;
var h = (window.innerHeight)/4;
var index = 0;
var xPos = 0;
var yPos = 0;
var indexMax = 3;
let style;
let resultImg;
function preload() {
loadData();
}
function loadData(){
var url = api + search + subscriptionKey;
loadJSON(url, gotData);
}
function gotData(data) {
imageData = data;
for (var i=0; i < indexMax; i++){
_url = imageData.value[i].contentUrl;
imgArray.push(loadImage(_url));
}
function displayImages(){
if (index < 3){
index++;
} else {
index = 0;
};
function setup() {
createCanvas(1200, 800).parent('canvasContainer');
var button = select('#display');
button.mousePressed(displayImages);
var transferBtn = select('#transferBtn');
transferBtn.mousePressed(transferImages);
//create style method
style = ml5.styleTransfer('/model', modelLoaded);
}
function draw() {
image(imgArray[index], xPos, yPos, w, h);
}
//ml5 stuff
function modelLoaded() {
if (style.ready){
select('#status').html('Model Loaded');
//style.transfer(gotResult);
}
}
function transferImages(){
select('#status').html('applying style transfer');
style.transfer(tempImg, function(err, result){
createImg(result.src);
});
select('#status').html('done');
}
I am attempting to (unsuccessfully) create a "tempImg" from imgArray[0] to try to figure out where this createImage needs to go, but have not gotten this to work. I have CORS enabled, so I didnt think this was the problem, but am getting the following error. Please help me understand how to think about this differently.
You should use loadImage instead of createImg.
style.transfer(tempImg, function(err, result){
p5CompatibleImage = loadImage(result.src);
});

How to make Flow understand dynamic code that uses lodash for runtime type-checking?

Flow's dynamic code example indicates that Flow can figure out runtime type-checking:
function foo(x) {
if (typeof x === 'string') {
return x.length; // flow is smart enough to see this is safe
} else {
return x;
}
}
var res = foo('Hello') + foo(42);
But in real life, typeof isn't good enough. I usually use lodash's type-checking functions (_.isFunction, _.isString etc), which handle a lot of edge cases.
The problem is, if we change the example to use lodash for the runtime type-checking, Flow no longer understands it:
function foo(x) {
if (_.isString(x)) {
return x.length; // warning: `length` property not found in Number
} else {
return x;
}
}
var res = foo('Hello') + foo(42);
I tried using iflow-lodash but it doesn't seem to make a difference here.
What's the best solution to make Flow understand code that uses lodash for runtime type-checking? I'm new to Flow btw.
This would depend on having predicate types in your lodash libdefs.
Predicate types have recently been added to Flow. Although they are still in an experimental state so I would recommend being careful about their usage for anything serious for now.
function isString(x): boolean %checks { // << declare that the method is a refinement
return typeof x === 'string';
}
function method(x: string | number): number {
if (isString(x)) { // << valid refinement
return x.charCodeAt(0); // << no errors
} else {
return x;
}
}
[try it out]
Note: This answer may quickly fall out of date in one of the next releases as this is a brand new feature. Check out Flow's changelog for the latest information.
The solution for now if possible is to use the built-in refinements.
function method(x: string | number): number {
if (typeof x === "string") { // << Inline the check
return x.charCodeAt(0);
} else {
return x;
}
}
The most obvious solution for this specific case is:
if (_.isString(x) && typeof x === 'string') {
In general, you might be able to overcome Flow errors with creative error suppression, like this:
if (_.isString(x)) {
// #ManuallyTyped
var xStr: string = x;
return xStr.length;
} else { ... }
Make sure to define // #ManuallyTyped as a custom suppress_comment in your flow config file for this to work. You might need an ugly regex for that, see flow docs.
It's been a while since I've last done this, but if I recall correctly Flow will assume that your xStr is a string, while the rest of type checking will work just fine.

SQLite read from SQLITE database in plugin

I need to read values from an SQLite database in my plugin. For that I found the sqlite.jsm module. My problem is I want to make a row as a global variable, but the code used in SQLite (promises and tasks) is asynchronous. Is there a way I can collect information from my database into a global variable?
let iDs = [];
Task.spawn(function* () {
let db = yield Sqlite.openConnection({ path:
permissionFilePath});
try {
let row = yield db.execute(
"SELECT id FROM 'moz_hosts'");
for ( i=0; i < row.length; i++) {
console.log("row["+ i +"] :" +
row[i].getResultByIndex(0));
yield iDs.push(row[i].getResultByIndex(0));
}
}
finally {
yield db.close();
}
});
// Part of the code that doesn't work, because IDs are not yet assigned any values!
console.log("debug");
for (i=0; i<iDs.length; i++) {
yield console.log("iDs ["+i+"] = "+ iDs[i]);
}
First of all, for a Task you only have to yield things that return promises and therefore run asynchronously. There is no need to yield iDs.push(row[i].getResultByIndex(0));, because the push operation will synchronously return the new length of the array. This shouldn't be much of an issue for the code itself though.
Do you really need the ids to be global? Maybe you can refactor your code so that you don't need to save them globally.
If this is not an option, you will have to block all operations that are going to access the ids until the SQL call has completed. You can do this by relying on the fact that Task.spawn() itself will also return a promise. This also has the nice side effect that you don't need the extra global array:
let idsPromise = Task.spawn(function*() {
let ids = [];
let db = yield Sqlite.openConnection({ path: permissionFilePath});
try {
let row = yield db.execute("SELECT id FROM 'moz_hosts'");
for (let i = 0, len = row.length; i < len; i++) {
ids.push(row[i].getResultByIndex(0));
}
// Instead of the loop you can also use:
// ids = row.map(row => row.getResultByIndex(0));
} finally {
yield db.close();
}
return ids;
});
Then, in other parts of your code, when you want the ids you can use:
idsPromise.then(function(ids) {
// Do something with ids
});
Alternatively, you can also get them in a Task:
Task.spawn(function*() {
let ids = yield idsPromise;
});
You can do this multiple times. Once a promise is resolved the then() part will be executed as soon as possible.

how to handle this type of things. using asp.net mvc

I have
public jsonresult update(studentinfo s)
{
for(i=0;i>0;i++)
{
var x = // i am getting some x so i am checking again
if( x != null)
{
var updateuser = student.update(s.student,"","");
**return json(updateuser.ToString());** // if i keep it here i am getting exceptoin saying not all code paths return value bec this return i can not keep it out for loop bec each and evary updateuser i need to return json..
}
}
}
How to overcome this type of things?
What language are you using to write your code? What you've posted doesn't look like any of the valid languages I know for .NET. Here's how the controller action might look in C# (assuming this is the language you are using):
public ActionResult Update(StudentInfo s)
{
// create some collection that will contain all updated users
var updatedUsers = new List<StudentInfo>();
// Revise the loop as it is absolutely not clear from your code
// what you are trying to do. The way you wrote the loop it will
// never execute - for(int i=0; i>0; i++)
for (int i = 0; i < 5; i++)
{
var updatedUser = student.Update(s.student, "", "");
updatedUsers.Add(updatedUser);
}
// return the list of updated users outside the loop so that the compiler
// doesn't complain about paths of the method not returning a value
return Json(updatedUsers);
}
If I understand correctly, you want to return a collection of users. The 'return' keyword does not work like that. You need to return the entire collection at once.

Can I get some advice on JavaScript delegates?

I'm rusty with delegates and closures in JavaScript, and think I came across a situation where I'd like to try to use one or both.
I have a web app that behaves a lot like a forms app, with fields hitting a server to change data on every onBlur or onChange (depending on the form element). I use ASP.NET 3.5's Web Services and jQuery to do most of the work.
What you need to know for the example:
isBlocking() is a simple mechanism to form some functions to be synchronous (like a mutex)
isDirty(el) checks to make sure the value of the element actually changed before wasting a call to the server
Agent() returns a singleton instance of the WebService proxy class
getApplicationState() passes a base-64 encoded string to the web service. This string represents the state of the application -- the value of the element and the state are passed to a service that does some calculations. The onSuccess function of the web service call returns the new state, which the client processes and updates the entire screen.
waitForCallback() sets a flag that isBlocking() checks for the mutex
Here's an example of one of about 50 very similar functions:
function Field1_Changed(el) {
if (isBlocking()) return false;
if (isDirty(el)) {
Agent().Field1_Changed($j(el).val(), getApplicationState());
waitForCallback();
}
}
The big problem is that the Agent().Field_X_Changed methods can accept a different number of parameters, but it's usually just the value and the state. So, writing these functions gets repetitive. I have done this so far to try out using delegates:
function Field_Changed(el, updateFunction, checkForDirty) {
if (isBlocking()) return false;
var isDirty = true; // assume true
if (checkForDirty === true) {
isDirty = IsDirty(el);
}
if (isDirty) {
updateFunction(el);
waitForCallback();
}
}
function Field1_Changed(el) {
Field_Changed(el, function(el) {
Agent().Field1_Changed($j(el).val(), getTransactionState());
}, true);
}
This is ok, but sometimes I could have many parameters:
...
Agent().Field2_Changed($j(el).val(), index, count, getApplicationState());
....
What I'd ultimately like to do is make one-linen calls, something like this (notice no getTransactionState() calls -- I would like that automated somehow):
// Typical case: 1 value parameter
function Field1_Changed(el) {
Field_Changed(el, delegate(Agent().Field1_Changed, $j(el).val()), true);
}
// Rare case: multiple value parameters
function Field2_Changed(el, index, count) {
Field_Changed(el, delegate(Agent().Field1_Changed, $j(el).val(), index, count), true);
}
function Field_Changed(el, theDelegate, checkIsDirty) {
???
}
function delegate(method) {
/* create the change delegate */
???
}
Ok, my first question is: Is this all worth it? Is this harder to read but easier to maintain or the other way around? This is a pretty good undertaking, so I may end up putting a bounty on this one, but I'd appreciate any help you could offer. Thanks!
UPDATE
So, I've accepted an answer based on the fact that it pointed me in the right direction. I thought I'd come back and post my solution so that others who may just be starting out with delegates have something to model from. I'm also posting it to see if anybody wants to try an optimize it or make suggestions. Here's the common Field_Changed() method I came up with, with checkForDirty and omitState being optional parameters:
function Field_Changed(el, args, delegate, checkForDirty, omitState) {
if (isBlocking()) return false;
if (!$j.isArray(args) || args.length == 0) {
alert('The "args" parameter in Field_Changed() must be an array.');
return false;
}
checkForDirty = checkForDirty || true; // assume true if not passed
var isDirty = true; // assume true for updates that don't require this check
if (checkForDirty === true) {
isDirty = fieldIsDirty(el);
}
if (isDirty) {
omitState = omitState || false; // assume false if not passed
if (!omitState) {
var state = getTransactionState();
args.push(state);
}
delegate.apply(this, args);
waitForCallback();
}
}
It handles everything I need it to (check for dirty, applying the application state when I need it to, and forcing synchronous webservice calls. I use it like this:
function TransactionAmount_Changed(el) {
Field_Changed(el, [cleanDigits($j(el).val())], Agent().TransactionAmount_Changed, true);
}
cleanDigits strips out junk characters the user may have tried to type in. So, thanks to everyone, and happy coding!
OK, few things:
Delegates are extremely simple in javascript since functions are first class members.
Function.apply lets you call a function with an array of arguments.
So you can write it this way
function Field_Changed(delegate, args)
{
if (isBlocking()) return false;
if (isDirty(args[0])) { //args[0] is el
delegate.apply(this, args);
waitForCallback();
}
}
And call it as:
Field_Changed(Agent().Field2_Changed, [el, getApplicationState(), whatever...]);
I have been using the following utility function that I wrote a long time ago:
/**
* #classDescription This class contains different utility functions
*/
function Utils()
{}
/**
* This method returns a delegate function closure that will call
* targetMethod on targetObject with specified arguments and with
* arguments specified by the caller of this delegate
*
* #param {Object} targetObj - the object to call the method on
* #param {Object} targetMethod - the method to call on the object
* #param {Object} [arg1] - optional argument 1
* #param {Object} [arg2] - optional argument 2
* #param {Object} [arg3] - optional argument 3
*/
Utils.createDelegate = function( targetObj, targetMethod, arg1, arg2, arg3 )
{
// Create an array containing the arguments
var initArgs = new Array();
// Skip the first two arguments as they are the target object and method
for( var i = 2; i < arguments.length; ++i )
{
initArgs.push( arguments[i] );
}
// Return the closure
return function()
{
// Add the initial arguments of the delegate
var args = initArgs.slice(0);
// Add the actual arguments specified by the call to this list
for( var i = 0; i < arguments.length; ++i )
{
args.push( arguments[i] );
}
return targetMethod.apply( targetObj, args );
};
}
So, in your example, I would replace
function Field1_Changed(el) {
Field_Changed(el, delegate(Agent().Field1_Changed, $j(el).val()), true);
}
With something along the lines
function Field1_Changed(el) {
Field_Changed(el, Utils.createDelegate(Agent(), Agent().Field1_Changed, $j(el).val()), true);
}
Then, inside of Agent().FieldX_Changed I would manually call getApplicationState() (and encapsulate that logic into a generic method to process field changes that all of the Agent().FieldX_Changed methods would internally call).
Closures and delegates in JavaScript:
http://www.terrainformatica.com/2006/08/delegates-in-javascript/
http://www.terrainformatica.com/2006/08/delegates-in-javascript-now-with-parameters/

Resources