EmberJS - throw error on `firebase` implementation - firebase

In my emberjs - i am implementing the firebase data base. for that I have changed the environment settings like this: ( please see my comments )
module.exports = function(environment) {
var ENV = {
modulePrefix: 'ember-simple-blog-eee6c',
environment: environment,
rootURL: '/',
contentSecurityPolicy: {
'script-src': "'self' 'unsafe-eval' apis.google.com",
'frame-src': "'self' https://*.firebaseapp.com",
'connect-src': "'self' wss://*.firebaseio.com https://*.googleapis.com"
},
firebase: {
authDomain: 'ember-simple-blog-eee6c.firebaseapp.com',
databaseURL: 'https://ember-simple-blog-eee6c.firebaseio.com/',
projectId: "ember-simple-blog-eee6c",
storageBucket: "",
messagingSenderId: "731960884482" //note sure taken from previous app
},
locationType: 'auto',
EmberENV: {
FEATURES: {
// Here you can enable experimental features on an ember canary build
// e.g. 'with-controller': true
},
EXTEND_PROTOTYPES: {
// Prevent Ember Data from overriding Date.parse.
Date: false
}
},
APP: {
// Here you can pass flags/options to your application instance
// when it is created
},
contentSecurityPolicy: {
'font-src': "'self' data: fonts.gstatic.com",
'style-src': "'self' 'unsafe-inline' fonts.googleapis.com"
}
};
if (environment === 'development') {
// ENV.APP.LOG_RESOLVER = true;
// ENV.APP.LOG_ACTIVE_GENERATION = true;
// ENV.APP.LOG_TRANSITIONS = true;
// ENV.APP.LOG_TRANSITIONS_INTERNAL = true;
// ENV.APP.LOG_VIEW_LOOKUPS = true;
}
if (environment === 'test') {
// Testem prefers this...
ENV.locationType = 'none';
// keep test console output quieter
ENV.APP.LOG_ACTIVE_GENERATION = false;
ENV.APP.LOG_VIEW_LOOKUPS = false;
ENV.APP.rootElement = '#ember-testing';
}
if (environment === 'production') {
}
return ENV;
};
After implemented this config, I am getting error as :
Uncaught Error: Could not find module `ember-simple-blog-eee6c/app` imported from `(require)`
What is wrong here? how to solve this? any one please help me .
Thanks in advance.

I suspect that your firebase has nothing to do with your error. I'm able to recreate your error on my own app by giving module-prefix an incorrect name. Is it possible you are naming your module-prefix after your firebase project instead of your ember Project? If so, I think that might be the reason. I think you need your modulePrefix to have the same name as the folder your Ember project is inside of.

Related

Next.js reach .env from /lib without exposing to the client

I my next.js blog app I have been trying to setup a global API call:
import { createClient } from "contentful";
const client = createClient({
space: process.env.CONTENTFUL_SPACE_ID,
accessToken: process.env.CONTENTFUL_ACCESS_KEY,
});
const auth_data = await client.getEntries({ content_type: "author" });
export function getAuthors() {
var authors = [];
var auth_len = auth_data.items.length;
for (var i = 0; i < auth_len; i++) {
authors.push({
authorSlug: auth_data.items[i].fields.name
.toString()
.replace(/ /g, "-")
.toLowerCase(),
authorContent: auth_data.items[i].fields.description,
authorFrontMatter: {
title: auth_data.items[i].fields.name,
image: "https:" + auth_data.items[i].fields.image.fields.file.url,
},
});
}
return authors;
}
I keep getting TypeError: Expected parameter accessToken because the environment variable will not be reached from the /lib foleder where this getAuthor() function is located. If I prefix the variable with NEXT_PUBLIC_ I could reach the environment variable from the /lib, but at the same time I would expose the variables to the browser.
Is there a way to reach the environment variable from the /lib WITHOUT exposing them to the browser?

Why my custom Nextjs server doesn't work in my Vercel deployment?

Hi I have this repository for testing. In localhost I can see when I execute "npm run dev" my custom server.... server.js at root makes the redirection if exists my custom cookie.
But In production in Vercel I cannot see this custom server is working (I can see because in Server.js I add a Session Cookie with name "n-session" with value "1", but in production not working (also the logs in Server.js don't display this traces in Server.js).
In my package.json I can see:
"scripts": {
"dev": "node server.js",
"build": "next build",
"start": "NODE_ENV=production node server.js",
"export": "next export",
},
My server.js has this code:
// server.js
const { createServer } = require('http')
const { parse } = require('url')
const next = require('next')
const app = next({})
const handle = app.getRequestHandler()
var Cookies = require('cookies');
const { localeLanguages } = require('next/config').default().publicRuntimeConfig;
app.prepare().then(() => {
createServer((req, res) => {
// Be sure to pass `true` as the second argument to `url.parse`.
// This tells it to parse the query portion of the URL.
const parsedUrl = parse(req.url, true);
const { pathname } = parsedUrl;
console.log("pathname", pathname)
if (pathname === '/') {
console.log("pathname is root..................")
const mainLang = process.env.NEXT_PUBLIC_MAIN_LANG;
let uriRedirect = null;
if (req && req.headers)
{
console.log("req.headers", req.headers)
const cookies = new Cookies(req, res);
let userLang = mainLang;
let userLangCookie = cookies.get(process.env.NEXT_PUBLIC_USER_LANGUAGE_COOKIE);
let initSession = cookies.get(process.env.NEXT_PUBLIC_INIT_SESION_COOKIE);
console.log("userLangCookie", userLangCookie)
console.log("initSession", initSession)
let acceptLanguage = req.headers['accept-language'];
if (acceptLanguage) {
acceptLanguage = (acceptLanguage.split(',')[0]).split('-')[0];
let found = localeLanguages.filter(function (e) {
return e.label == acceptLanguage;
});
if (found.length > 0) {
userLang = acceptLanguage;
}
if (typeof initSession === "undefined" || initSession === null)
{
if (typeof userLangCookie === "undefined" ||
userLangCookie === null && userLang !== mainLang)
{
uriRedirect = `/${userLang}`;
}
else if (userLangCookie !== mainLang)
{
uriRedirect = `/${userLangCookie}`;
}
cookies.set(process.env.NEXT_PUBLIC_INIT_SESION_COOKIE, 1, {
httpOnly: true // true by default
})
}
}
}
console.log("uriRedirect", uriRedirect)
if (uriRedirect !== null) {
res.writeHead(302, { Location: `${uriRedirect}` }).end();
} else {
handle(req, res, parsedUrl);
}
} else {
handle(req, res, parsedUrl);
}
}).listen(3000, (err) => {
if (err) throw err
console.log('> Ready on http://localhost:3000')
})
})
I upload to Vercel using my github repository with git commands add/commit/push
My repository is this: https://github.com/anialamo/nootric-next10
Can you help me please? What is wrong in my deploy?
Where should my server.js file be hosted? also in Vercel?
A lot of thanks!
Before using a custom server, you must read the doc to see the disadvantages
it states this:
"A custom server can not be deployed on Vercel, the platform Next.js was made for.
Before deciding to use a custom server please keep in mind that it should only be used when the integrated router of Next.js can't meet your app requirements. A custom server will remove important performance optimizations, like serverless functions and Automatic Static Optimization."
Reference

Next.js returns 500: internal server error in Production

Created a next.js full stack application. After production build when I run next start it returns 500 : internal server. I'm using environment varibles for hitting api.
env.development file
BASE_URL=http://localhost:3000
It was working fine in development
service.ts
import axios from 'axios';
const axiosDefaultConfig = {
baseURL: process.env.BASE_URL, // is this line reason for error?
headers: {
'Access-Control-Allow-Origin': '*'
}
};
const axio = axios.create(axiosDefaultConfig);
export class Steam {
static getGames = async () => {
return await axio.get('/api/getAppList');
};
}
Do you have a next.config.js file?
To add runtime configuration to your app open next.config.js and add the publicRuntimeConfig and serverRuntimeConfig configs:
module.exports = {
serverRuntimeConfig: {
// Will only be available on the server side
mySecret: 'secret',
secondSecret: process.env.SECOND_SECRET, // Pass through env variables
},
publicRuntimeConfig: {
// Will be available on both server and client
staticFolder: '/static',
},
}
To get access to the runtime configs in your app use next/config, like so:
import getConfig from 'next/config'
// Only holds serverRuntimeConfig and publicRuntimeConfig
const { serverRuntimeConfig, publicRuntimeConfig } = getConfig()
// Will only be available on the server-side
console.log(serverRuntimeConfig.mySecret)
// Will be available on both server-side and client-side
console.log(publicRuntimeConfig.staticFolder)
function MyImage() {
return (
<div>
<img src={`${publicRuntimeConfig.staticFolder}/logo.png`} alt="logo" />
</div>
)
}
export default MyImage
I hope this helps.
I dont think you have setup env.
You need to configure it for it to work. Try it without it and it should work fine!

S3 bucket with credentials error

I'm having trouble using the meteor slingshot component with the S3 with temporary AWS Credentials component. I keep getting the error Exception while invoking method 'slingshot/uploadRequest' InvalidClientTokenId: The security token included in the request is invalid.
Absolutely no idea what I'm doing wrong. If I use slingshot normally without credentials it works fine.
import { Meteor } from 'meteor/meteor';
import moment from 'moment';
const cryptoRandomString = require('crypto-random-string');
var AWS = require('aws-sdk');
var sts = new AWS.STS();
Slingshot.createDirective('UserProfileResumeUpload', Slingshot.S3Storage.TempCredentials, {
bucket: 'mybuckname', // change this to your s3's bucket name
region: 'ap-southeast-2',
acl: 'private',
temporaryCredentials: Meteor.wrapAsync(function (expire, callback) {
//AWS dictates that the minimum duration must be 900 seconds:
var duration = Math.max(Math.round(expire / 1000), 900);
sts.getSessionToken({
DurationSeconds: duration
}, function (error, result) {
callback(error, result && result.Credentials);
});
}),
authorize: function () {
//Deny uploads if user is not logged in.
if (!this.userId) {
const message = 'Please login before posting files';
throw new Meteor.Error('Login Required', message);
}
return true;
},
key: function () {
return 'mydirectory' + '/' + cryptoRandomString(10) + moment().valueOf();
}
});
Path: Settings.json
{
"AWSAccessKeyId": "myAWSKEYID",
"AWSSecretAccessKey": "MyAWSSeceretAccessKey"
}
I've done it in server side like this :
Slingshot.createDirective("UserProfileResumeUpload", Slingshot.S3Storage, {
AWSAccessKeyId: Meteor.settings.AWS.AccessKeyId,
AWSSecretAccessKey: Meteor.settings.AWS.SecretAccessKey,
bucket: 'mybuckname', // change this to your s3's bucket name
region: 'ap-southeast-2',
acl: 'private',
...
}
and in settings.json
{
"AWS": {
"AccessKeyId": "myAWSKEYID",
"SecretAccessKey": "MyAWSSeceretAccessKey"
}
}

Firebase Storage & Cloud Functions - ECONNRESET

I developed a Firebase Cloud function that processes several manipulations on uploaded images.
My code is based on this documentation article and this Cloud Function example. Hence, it is using Google Cloud Storage package.
It is working fine almost all the time, but sometimes I am getting this error when uploading to or deleting from Storage :
Error: read ECONNRESET
at exports._errnoException (util.js:1026:11)
at TLSWrap.onread (net.js:569:26)
I am using the default bucket of my application, referenced by event.data.bucket.
Let me know if you need additional information or code snippets, even if my code is really close to the Function example I linked before.
I found this GitHub issue, but I checked that I am returning a promise everytime. For example, here is the deletion part that triggers the error :
index.js
exports.exampleFunction = functions.storage.object().onChange(event => {
return f_thumbnails.exampleFunction(event);
});
example_function.js
module.exports = exports = function (_admin, _config) {
admin = _admin;
config = _config;
return {
"exampleFunction": function (event) {
return exampleFunction(event);
}
};
};
const exampleFunction = function (event) {
const gcsSourceFilePath = event.data.name;
const gcsSourceFilePathSplit = gcsSourceFilePath.split('/');
const gcsBaseFolder = gcsSourceFilePathSplit.length > 0 ? gcsSourceFilePathSplit[0] : '';
const gcsSourceFileName = gcsSourceFilePathSplit.pop();
const gceSourceFileDir = gcsSourceFilePathSplit.join('/') + (gcsSourceFilePathSplit.length > 0 ? '/' : '');
// Not an image
if (!event.data.contentType.startsWith('image/')) {
console.log('Not an image !');
return;
}
// Thumbnail
if (gcsSourceFileName.startsWith(config.IMAGES_THUMBNAIL_PREFIX)) {
console.log('Thumbnail !');
return;
}
const bucket = gcs.bucket(event.data.bucket);
const gcsThumbnailFilePath = gceSourceFileDir + config.IMAGES_THUMBNAIL_PREFIX + gcsSourceFileName;
// File deletion
if (event.data.resourceState === 'not_exists') {
console.log('Thumbnail deletion : ' + gcsThumbnailFilePath);
return bucket.file(gcsThumbnailFilePath).delete().then(() => {
console.log('Deleted thumbnail ' + gcsThumbnailFilePath);
});
}
...
This seems to be related to the google-cloud-node library's handling of sockets, and the default socket timeout in the Cloud Functions environment.
One solution verified by a user is to modify the way the library invokes requests, to not keep the socket open forever by specifying forever: false, eg.
var request = require('request').defaults({
timeout: 60000,
gzip: true,
forever: false,
pool: {
maxSockets: Infinity
}
});
This is hardcoded in packages/common/src/utils.js, so you'll need to vendor a copy of the modified library into your project rather than include it as an NPM dependency. See the related public issue for more details on the issue and a link to a fork with the patch applied.

Resources