This Meteor server code (part of an app) running on the local machine downloads a file from the web and saves it to the AWS S3.
This Meteor app also runs on EC2 docker container. but when the below modifications are made, it failed to run as docker ps does not show a running container.
The modifications runs ok on the local machine which downloads a file from the web and uploads it to AWS S3.
Any ideas how to fix it so that when runs on the EC2 docker container it downloads the file and saves it to the AWS S3? Thanks
// server
let AWS = require('aws-sdk');
fs = require('fs');
let request = Npm.require('request');
Meteor.startup(() => {
AWS.config.update({
accessKeyId: 'abc',
secretAccessKey: 'xyz'
});
let url = "http://techslides.com/demos/sample-videos/small.mp4";
let fileArray = url.split("/");
let file = fileArray[fileArray.length - 1];
// (((it would be good if copying locally is avoided)))
// let localFilePath = "/home/ec2-user/"+file; // <=== fails on EC2
let localFilePath = "/local/path/ + file; // <=== works locally
request(url).pipe(fs.createWriteStream(localFilePath)).on("finish", function() {
fs.readFile(localFilePath, function(err, data) {
if (err) {
console.log("file does not exists");
throw err;
}
let base64data = new Buffer(data, 'binary');
let s3 = new AWS.S3();
s3.putObject({
Bucket: 'myBucket',
Key: file,
Body: base64data,
}, function(resp) {
console.log(arguments);
console.log('Successfully uploaded package.');
fs.unlink(localFilePath);
});
})
});
});
The reason is that the local docker file system is read only, so you can't save a file locally. See this answer to a similar question: Allow user to download file from public folder Meteor.js
There are several Meteor packages to help you with this, such as https://atmospherejs.com/ostrio/files You can do a search on Atmosphere to find a suitable package
Related
, I've build a new next js app , when i upload images from the browser i stored them in the public/Img/product using multer , and it works in the localhost but when i deploy it throws me
ENOENT: no such file or directory, open './public/Img/products/image.png'
That's my code
const storage = multer.diskStorage({
destination:function(req,file,cb){
cb(null,'./public/Img/products')
},
filename: function(req,file,cb){
cb(null,file.originalname);
}
})
const upload = multer({
storage
})
apiRoute.post(upload.single('image'),(req,res) => {
let sql = "INSERT INTO `Produit`( `Nom`, `image`, `prix`, `Description`, `Quantite`, `promo`, `id_cat`) VALUES (?,?,?,?,?,?,?)";
connection.query(sql,[req.body.name,`/Img/products/${req.file.originalname}`,req.body.price,req.body.description,req.body.qty,req.body.promo,req.body.categorie],(err,result) => {
if (err) return res.status(500).send(err);
res.statusCode = 200;
res.send(result);
});
console.log(`${process.env.NEXTAUTH_URL}/Img/products/${req.file.originalname}`,req.body)
});
If any one know what's going on just respond me and I !
Contract deploys to address 0x47c5e40890bcE4a473A49D7501808b9633F29782
It looks like many other contracts were deployed to the same address by other people.
Should the address of the contract not be unique or is it deterministically generated or cached somehow by hardhat?
Why would other people have deployed to the same address?
I am wondering if this is some bug with Polygon/Mumbai testnet
const { ethers } = require("hardhat");
async function main() {
const SuperMarioWorld = await ethers.getContractFactory("Rilu");
const superMarioWorld = await SuperMarioWorld.deploy("Rilu", "RILU");
await superMarioWorld.deployed();
console.log("Success contract was deployed to: ", superMarioWorld.address)
await superMarioWorld.mint("https://ipfs.io/ipfs/QmZkUCDt5CVRWQjLDyRS4c8kU6UxRNdpsjMf6vomDcd7ep")
}
// We recommend this pattern to be able to use async/await everywhere
// and properly handle errors.
main()
.then(() => process.exit(0))
.catch((error) => {
console.error(error);
process.exit(1);
});
Hardhat
module.exports = {
solidity: '0.8.4',
networks: {
mumbai: {
url: process.env.MUMBAI_RPC,
accounts: [process.env.PRIVATE_KEY],
},
},
};
.env file (no problem with sharing the private key, just one I got from vanity-eth.tk and used for testing)
PRIVATE_KEY=84874e85685c95440e51d5edacf767f952f596cca6fd3da19b90035a20f57e37
MUMBAI_RPC=https://rpc-mumbai.maticvigil.com
Output
~/g/s/b/r/nft ❯❯❯ npx hardhat run scripts/deploy.js --network mumbai ✘ 1
Compiling 12 files with 0.8.4
Compilation finished successfully
Success contract was deployed to: 0x47c5e40890bcE4a473A49D7501808b9633F29782
This Meteor server code gets called in various places, the code running on my local development server saves a file to the hard driver.
But now it is running on AWS EC2 Docker container, How do I go about writing the file to a S3 bucket? thx
'saveToFile': function (fileName, text) {
if (env != 'development') return;
const playPath = '/Users/localPath/' + fileName + '.html';
fs.writeFile(playPath, text, (err) => {
if (err) throw err;
console.log(`Saved to ` + fileName);
});
}
You need to use the NodeJS SDK for AWS.
https://aws.amazon.com/sdk-for-node-js/
Direct example -
https://gist.github.com/homam/8646090
Is it possible to copy an existing file on firebase storage without needing to uploading it again?
I need it for a published/working version setup of my app.
There is no method in the Firebase Storage API to make a copy of a file that you've already uploaded.
But Firebase Storage is built on top of Google Cloud Storage, which means that you can use the latter's API too. It looks like gsutil cp is what you're looking for. From the docs:
The gsutil cp command allows you to copy data between your local file system and the cloud, copy data within the cloud, and copy data between cloud storage providers.
Keep in mind that gsutil has full access to your storage bucket. So it is meant to be run on devices you fully trust (such as a server or your own development machine).
Here is an approach I ended up with for my project.
While it covers a broader case and copies all files under folder fromFolder to toFolder, it can be easily adopted to the case from the question (to copy only files one can pass delimiter = "/" - refer to the docs for more details)
const {Storage} = require('#google-cloud/storage');
module.exports = class StorageManager{
constructor() {
this.storage = new Storage();
this.bucket = this.storage.bucket(<bucket-name-here>)
}
listFiles(prefix, delimiter){
return this.bucket.getFiles({prefix, delimiter});
}
deleteFiles(prefix, delimiter){
return this.bucket.deleteFiles({prefix, delimiter, force: true});
}
copyFilesInFolder(fromFolder, toFolder){
return this.listFiles(fromFolder)
.then(([files]) => {
let promiseArray = files.map(file => {
let fileName = file.name
let destination = fileName.replace(fromFolder, toFolder)
console.log("fileName = ", fileName, ", destination = ", destination)
return file.copy(destination)
})
return Promise.all(promiseArray)
})
}
}
I needed to copy folders (including all it's descendants) in Google Cloud Storage. Here is the solution proposed by #vir-us, simplified and for NodeJS.
import type { Bucket } from '#google-cloud/storage';
/** Copy Cloud Storage resources from one folder to another. */
export const copyStorageFiles = async (input: {
bucket: Bucket;
fromFolder: string;
toFolder: string;
logger?: Console['info'];
}) => {
const { bucket, fromFolder, toFolder, logger = console.info } = input;
const [files] = await bucket.getFiles({ prefix: fromFolder });
const promiseArray = files.map((file) => {
const destination = file.name.replace(fromFolder, toFolder);
logger("fileName = ", file.name, ", destination = ", destination);
return file.copy(destination);
});
return Promise.all(promiseArray);
};
In my meteor app, the server try to download some file to store them on filesystem.
I use Meteor.http package to do that, but in fact, if file are downloaded, they seems to be corrupted.
var fileUrl = 'http://cdn.sstatic.net/stackoverflow/img/sprites.png?v=5'; //for example
Meteor.http.call("GET", fileUrl, function funcStoreFile(error, result) {
"use strict";
if (!error) {
var fstream = Npm.require('fs'),
filename = './.meteor/public/storage/' + collectionId;
fstream.writeFile(filename, result.content, function funcStoreFileWriteFS(err) {
if (!err) {
var Fiber = Npm.require('fibers');
Fiber(function funcStoreImageSaveDb() {
MyfileCollection.update({_id: collectionId}, {$set: {fileFsPath: filename}});
}).run();
} else {
console.log('error during writing file', err);
}
});
} else {
console.log('dl file FAIL');
}
});
I did a symlink from public/storage to ../.meteor/public/storage to enable direct download from url (http://localhost:3000/storage/myfileId)
When i compare the file downloaded with this system and the same file downloaded directly from a browser, they are different. What's wrong with my conception?
I had a similar problem and made a solution based on this discussion:
on https://github.com/meteor/meteor/issues/905
By using the request library, which meteor is using under the hood as well, one can avoid the problem with binary downloads. Besides I would recommend not saving small files to the filesystem but base64 encoded in mongodb directly. This is the easiest solution, if you plan to deploy to meteor.com or other cloud services.
An other glitch I found when saving files to the public dir in development is that meteor is reloading the files for every change in the public dir. this can lead to data corruption as chunks of the file are being downloaded. Here some code i am using based on the above discussion.
Future = Npm.require("fibers/future")
request = Npm.require 'request'
Meteor.methods
downloadImage: (url) ->
if url
fut = new Future()
options =
url: url
encoding: null
# Get raw image binaries
request.get options, (error, result, body) ->
if error then return console.error error
base64prefix = "data:" + result.headers["content-type"] + ";base64,"
image = base64prefix + body.toString("base64")
fut.ret image
# pause until binaries are fully loaded
return fut.wait()
else false
Meteor.call 'downloadImage', url, (err, res) ->
if res
Movies.update({_id: id}, {$set: {image: res}})
Hope this is helpful.