I am trying to ship all airflow logs to kafka by attaching a new handler to the root logger, but not all logs are being published. Do I need to configure something else here?
This is what I'm doing:
custom_log_config.py
LOGGING_CONFIG = deepcopy(DEFAULT_LOGGING_CONFIG)
# Configure a new handler for publishing logs to kafka
environment = get_app_env()
LOGGING_CONFIG["handlers"]["kafka_handler"] = {
"class": "com.test.log_handler.KafkaHandler",
"formatter": "airflow",
"version": environment.version,
"log_file": log_file,
"filters": ["mask_secrets"],
}
# Attach handler to root logger of airflow
LOGGING_CONFIG["root"]["handlers"].append("kafka_handler")
And finally I'm setting airflow configs to use the new logger class described above:
airflow.logging__logging_config_class=com.test.log_handler.custom_log_config.LOGGING_CONFIG
While some logs do flow to kafka, I'm missing task run logs (eg. following loggers: taskinstance.py, standard_task_runner.py, cli_action_loggers.py)
Look at the DEFAULT_LOGGING_CONFIG:
'loggers': {
'airflow.processor': {
'handlers': ['processor'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['task'],
'level': LOG_LEVEL,
'propagate': False,
'filters': ['mask_secrets'],
},
'flask_appbuilder': {
'handlers': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
},
},
You will find that tasks have separate logger "airflow.task"
Related
I am using RTK-Query, and Redux-toolkit for this app, and I created an api-slice with createApi, as per the docs.
When I run a request to the backend, I get a "FETCH_ERROR"; however, when I run the same request using Axios, I get the data correctly from the backend, which leads me to believe I have an error in my code. I am just not sure where exactly it is.
Here is the error:
Object {
"api": Object {
"config": Object {
"focused": true,
"keepUnusedDataFor": 60,
"middlewareRegistered": true,
"online": true,
"reducerPath": "api",
"refetchOnFocus": false,
"refetchOnMountOrArgChange": false,
"refetchOnReconnect": false,
},
"mutations": Object {},
"provided": Object {},
"queries": Object {
"test(undefined)": Object {
"endpointName": "test",
"error": Object {
"error": "TypeError: Network request failed",
"status": "FETCH_ERROR",
},
"requestId": "BWOuLpOxoDKTzlUYFLW4x",
"startedTimeStamp": 1643667104869,
"status": "rejected",
},
},
"subscriptions": Object {
"test(undefined)": Object {
"QJSCV641RznGWyudGWuMb": Object {
"pollingInterval": 0,
"refetchOnFocus": undefined,
"refetchOnReconnect": undefined,
},
},
},
},
"test": Object {
"data": Array [],
},
}
Here is the test slice:
import { createSlice } from "#reduxjs/toolkit";
const testSlice = createSlice({
name: "test",
initialState: {
data: [],
},
reducers: {
getData: (state) => {
state;
},
},
});
export const { getData } = testSlice.actions;
export default testSlice.reducer;
Here is the apiSlice:
import { createApi, fetchBaseQuery } from "#reduxjs/toolkit/query/react";
export const apiSice = createApi({
reducerPath: "test",
baseQuery: fetchBaseQuery({ baseUrl: process.env.REACT_APP_backend_url }),
endpoints: (builder) => ({
test: builder.query({
query: () => "/test",
}),
}),
});
export const { useTestQuery } = apiSice;
I solved it by changing the backend URL to my current ipv4 (for expo development, otherwise just your whatever your backend URL is) address in my .env file, then deleting cache, and restarting my app. In my case I was using expo so, expo r -c, and it worked.
I have started Alexa development very recently. Today I have suddenly started getting
which I did not encounter before.
The lambda function (index.js):
"use strict";
const Alexa = require("ask-sdk-core");
const http = require("http");
exports.handler = async (event, context, callback) => {
try {
if (event.request.type === "LaunchRequest") {
var welcomeMessage = '<speak>Hi</speak>';
callback(null, buildResponse(welcomeMessage, false));
}
else if (event.request.type === "AMAZON.CancelIntent") {
var msg2 = "<speak>Stopped!</speak>";
callback(null, buildResponse(msg2, true));
}
} catch (e) {
context.fail("Exception: ${e}");
}
};
function buildResponse(response, shouldEndSession) {
return {
version: "1.0",
response: {
outputSpeech: {
type: "SSML",
ssml: response
},
shouldEndSession: shouldEndSession
},
sessionAttributes: {}
};
}
package.json file:
{
"name": "third-test-skill",
"version": "1.0.0",
"description": "...",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"author": "Subrata Sarkar",
"license": "ISC",
"dependencies": {
"ask-sdk-core": "^2.5.2",
"ask-sdk-model": "^1.15.1"
}
}
Steps I followed to create the skill:
Crated a skill from AWS Alexa console
Created skill
Added sample utterances
Selected Lamda as end point Created a function called movieFacts
Uploaded .zip file containing the following file structure.
node_modules
|- ask-sdk-core
|- ask-sdk-model
|- ask-sdk-runtime
index.js
package.json
package.json.lock
When I say movie facts, I am getting the following message:
The requested skill did not provide a valid response
And this is the JSON output I am receiving:
{
"version": "1.0",
"session": {
"new": false,
"sessionId": "amzn1.echo-api.session.beca8832-50fe-4d17-96a4-30c855b18a4f",
"application": {
"applicationId": "amzn1.ask.skill.bdb88b1b-5a4a-4b37-9b63-71e78337bbca"
},
"user": {
"userId": "amzn1.ask.account.AEG2YALM6KQANVKR3YSUWKVN5DCKE66NJKN23SZIKRKZCVTU67E2JBZ5STPFIN325WNGGO5Z73FMVVL5X2SVEM27YEPD5VFNMPVDQSQK5XYW3NXOXSEIK6YPHE5HTZLGLCWW4VVQHLYECL6YBLG4XOTM2HTV5VCCQMPLVCIATFRSNS4DLHJFLY32JHD5N5MAPFBNRVN3YV7B53A"
}
},
"context": {
"System": {
"application": {
"applicationId": "amzn1.ask.skill.bdb88b1b-5a4a-4b37-9b63-71e78337bbca"
},
"user": {
"userId": "amzn1.ask.account.AEG2YALM6KQANVKR3YSUWKVN5DCKE66NJKN23SZIKRKZCVTU67E2JBZ5STPFIN325WNGGO5Z73FMVVL5X2SVEM27YEPD5VFNMPVDQSQK5XYW3NXOXSEIK6YPHE5HTZLGLCWW4VVQHLYECL6YBLG4XOTM2HTV5VCCQMPLVCIATFRSNS4DLHJFLY32JHD5N5MAPFBNRVN3YV7B53A"
},
"device": {
"deviceId": "amzn1.ask.device.AFXLD474IMHMD5V35NT2ZNUD5YLK2LTEJZUMO6DS2MY7ANONMZDZ67C3MU44OBJ6B5N4TPOXIJ64FBEFEOVOB2K4SSYEN3VTRSIHZETNTBNCDYUG6RGFIOKH7S7OBID6CG3WIHB774LNO4CFKWFUXYSNHD5HIAAXCEDKZ3U4EN7QB6EN4RRHQ",
"supportedInterfaces": {}
},
"apiEndpoint": "https://api.amazonalexa.com",
"apiAccessToken": "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6IjEifQ.eyJhdWQiOiJodHRwczovL2FwaS5hbWF6b25hbGV4YS5jb20iLCJpc3MiOiJBbGV4YVNraWxsS2l0Iiwic3ViIjoiYW16bjEuYXNrLnNraWxsLmJkYjg4YjFiLTVhNGEtNGIzNy05YjYzLTcxZTc4MzM3YmJjYSIsImV4cCI6MTU1NzI0MTIxMCwiaWF0IjoxNTU3MjQwOTEwLCJuYmYiOjE1NTcyNDA5MTAsInByaXZhdGVDbGFpbXMiOnsiY29uc2VudFRva2VuIjpudWxsLCJkZXZpY2VJZCI6ImFtem4xLmFzay5kZXZpY2UuQUZYTEQ0NzRJTUhNRDVWMzVOVDJaTlVENVlMSzJMVEVKWlVNTzZEUzJNWTdBTk9OTVpEWjY3QzNNVTQ0T0JKNkI1TjRUUE9YSUo2NEZCRUZFT1ZPQjJLNFNTWUVOM1ZUUlNJSFpFVE5UQk5DRFlVRzZSR0ZJT0tIN1M3T0JJRDZDRzNXSUhCNzc0TE5PNENGS1dGVVhZU05IRDVISUFBWENFREtaM1U0RU43UUI2RU40UlJIUSIsInVzZXJJZCI6ImFtem4xLmFzay5hY2NvdW50LkFFRzJZQUxNNktRQU5WS1IzWVNVV0tWTjVEQ0tFNjZOSktOMjNTWklLUktaQ1ZUVTY3RTJKQlo1U1RQRklOMzI1V05HR081WjczRk1WVkw1WDJTVkVNMjdZRVBENVZGTk1QVkRRU1FLNVhZVzNOWE9YU0VJSzZZUEhFNUhUWkxHTENXVzRWVlFITFlFQ0w2WUJMRzRYT1RNMkhUVjVWQ0NRTVBMVkNJQVRGUlNOUzRETEhKRkxZMzJKSEQ1TjVNQVBGQk5SVk4zWVY3QjUzQSJ9fQ.UyCg4MXlOe16SlOyJnjAIiHzVpdLkRjd-izoKkUnGqiyZ0L_5eUpg8tKvVrCvTLNMtJS6ElksxgVfuLcNeOIwSbXtYCOXcSLRYbpcpgFI6oeamOZ2Yo-UMDEjzYi75fABuJyUJyZxp-Pieer8PMZO4G9-5zJXCVY2x3M_dmlpX23UBJDpW0DKddvAOzConmwgdaf3v_EWfc2q8BaCQIM950rEUbejOa08_AwE5CsqjNA9sD22QduE5hs09RV4-F-kU1zKvwwyDVDKyOkdFZQFEmCTC11_jI64re9c22e-hYR4leIE5XntNApMgtwaL-tHyjsJzVDVDfZd2q3w6wxYA"
},
"Viewport": {
"experiences": [
{
"arcMinuteWidth": 246,
"arcMinuteHeight": 144,
"canRotate": false,
"canResize": false
}
],
"shape": "RECTANGLE",
"pixelWidth": 1024,
"pixelHeight": 600,
"dpi": 160,
"currentPixelWidth": 1024,
"currentPixelHeight": 600,
"touch": [
"SINGLE"
],
"video": {
"codecs": [
"H_264_42",
"H_264_41"
]
}
}
},
"request": {
"type": "SessionEndedRequest",
"requestId": "amzn1.echo-api.request.c7b1b910-6309-48aa-af35-10ac0a20b5da",
"timestamp": "2019-05-07T14:55:10Z",
"locale": "en-US",
"reason": "ERROR",
"error": {
"type": "INVALID_RESPONSE",
"message": "An exception occurred while dispatching the request to the skill."
}
}
}
I think removing the <speak> and </speak> tags around the welcome message should do. They are by default generated by Alexa, so you don't need to provide them in your response.
What happened in my case (and definitely worth to check) is that I was using Serverless Framework to deploy Lambda and I changed the configuration in the serverless.yml but forgot to update the Lambda ARN in the Alexa Endpoint.
Updating the Endpoint to the correct ARN and rebuilding solved the issue.
I tried the airflow tutorial DAG, and it works with scheduler, I can see the logs generated by scheduled run. But if I use command line test, I didn't see the output:
airflow test my_tutorial_2 templated 2018-09-08
[2018-09-10 15:41:43,121] {__init__.py:51} INFO - Using executor SequentialExecutor
[2018-09-10 15:41:43,281] {models.py:258} INFO - Filling up the DagBag from /Users/xiang/Documents/BigData/airflow/dags
[2018-09-10 15:41:43,338] {example_kubernetes_operator.py:54} WARNING - Could not import KubernetesPodOperator: No module named 'kubernetes'
[2018-09-10 15:41:43,339] {example_kubernetes_operator.py:55} WARNING - Install kubernetes dependencies with: pip install airflow['kubernetes']
That is all the output, and my output is not there.
The airflow version is:
▶ pip list
Package Version
---------------- ---------
alembic 0.8.10
apache-airflow 1.10.0
If you use Ariflow v1.10, you can set the propagate attribute of taskinstance logger to True, then the log record will be propagated to root logger, which use console handler, and printed to sys.stdout.
Add ti.log.propagate = True
after line 589 to site-packages/airflow/bin/cli.py could do this trick.
I've since found that whilst setting 'console' as a handler for the airflow.task logger allows you to see the output of 'airflow test' commands, it also seems to cause 'airflow run' commands to enter an infinite loop and run out of memory. I would therefore only do this in an environment where you only want to run 'airflow test' commands
Why it does this I don't know yet, and whether there's a way to accomplish this question without breaking 'airflow run' is unclear to me
The default logging config for Airflow 1.10.0 has the following loggers available:
'loggers': {
'airflow.processor': {
'handlers': ['processor'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['task'],
'level': LOG_LEVEL,
'propagate': False,
},
'flask_appbuilder': {
'handler': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
}
},
and the airflow.task logger (which is the logger used when running your task) uses the 'task' handler:
'handlers': {
'console': {
'class': 'airflow.utils.log.logging_mixin.RedirectStdHandler',
'formatter': 'airflow',
'stream': 'sys.stdout'
},
'task': {
'class': 'airflow.utils.log.file_task_handler.FileTaskHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER),
'filename_template': FILENAME_TEMPLATE,
},
'processor': {
'class': 'airflow.utils.log.file_processor_handler.FileProcessorHandler',
'formatter': 'airflow',
'base_log_folder': os.path.expanduser(PROCESSOR_LOG_FOLDER),
'filename_template': PROCESSOR_FILENAME_TEMPLATE,
},
},
which (unless changed) will only write the output of the task to a log file. If you want to see the output in stdout as well, then you need add the console handler to the list of handlers used by the airflow.task logger:
'airflow.task': {
'handlers': ['task', 'console'],
'level': LOG_LEVEL,
'propagate': False,
},
This can be done by either setting up a custom logging configuration class, which overrides the default configuration, or by editing the default settings file
wherever_you_installed_airflow/site-packages/airflow/config_templates/airflow_local_settings.py
I ran into this problem as well with AirFlow 1.10.0. As Louis Genasi mentioned, airflow run would go into a death spiral with default settings and console handler. I suspect there may be a bug with the default logging class in 1.10.0.
I got around the issue by changing the logging handler to Python's logging.StreamHandler (which appears to be the default in Airflow < 1.10.0):
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'formatter': 'airflow',
'stream': 'ext://sys.stdout'
},
'loggers': {
'airflow.processor': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
'airflow.task': {
'handlers': ['console'],
'level': LOG_LEVEL,
'propagate': False,
},
'flask_appbuilder': {
'handler': ['console'],
'level': FAB_LOG_LEVEL,
'propagate': True,
}
},
'root': {
'handlers': ['console'],
'level': LOG_LEVEL,
}
I am trying to setup VSCode 1.6.1 to be able to debug Meteor 1.4.1.2 apps written in CoffeeScript 1.11.1_1.
In the VSCode launch.json file, I have defined two attach configurations:
- one to debug the server-side code using the built-in node debugger
- one to debug the client-side code using the chrome debugger extension
After a debugger statement, I have put breakpoints in all 3 parts of the code of the Meteor leaderboard example: in the isomorphic section running on both the client and the server, as well as in the client-only and server-only sections of the code.
When I attach either debugger while the app is paused on the debugger statement, the breakpoints turn grey with the warning: "breakpoints ignored because generated code not found (source map problem?)"
The launch.json is file is the following:
{
"version": "0.2.0",
"configurations": [
{
"name": "Launch",
"type": "node",
"request": "launch",
"program": "${workspaceRoot}/leaderboard.coffee",
"stopOnEntry": false,
"args": [],
"cwd": "${workspaceRoot}",
"preLaunchTask": null,
"runtimeExecutable": null,
"runtimeArgs": [
"--nolazy"
],
"env": {
"NODE_ENV": "development"
},
"console": "internalConsole",
"sourceMaps": true,
"outFiles": [
"${workspaceRoot}/.meteor/local/build/programs/server/app/app.js",
"${workspaceRoot}/.meteor/local/build/programs/web.browser/app/app.js"
]
},
{
"name": "Attach Server",
"type": "node",
"request": "attach",
"port": 5858,
"address": "localhost",
"restart": false,
"sourceMaps": true,
"outFiles": ["${workspaceRoot}/.meteor/local/build/programs/server/app/app.js"],
"localRoot": "${workspaceRoot}",
"remoteRoot": null
},
{
"name": "Attach Client",
"type": "chrome",
"request": "attach",
"port": 9222,
"address": "localhost",
"sourceMaps": true
// "url": "//localhost:9222/"
},
{
"name": "Attach to Process",
"type": "node",
"request": "attach",
"processId": "${command.PickProcess}",
"port": 5858,
"sourceMaps": false,
"outFiles": []
}
]
}
The source leaderboard.coffee file is the following:
class Player
constructor: (#name, #score = Math.floor(Math.random() * 10)) ->
inc: -> #score = #score + 1
class Leaderboard extends Mongo.Collection
insertNewPlayer: (playerName) ->
player = new Player(playerName)
#insert player
insertPlayers: (playerNames) ->
# #remove({})
#insertNewPlayer(playerName) for playerName in playerNames
self = #
insertRank: =>
sortedPlayers = #find {}, {sort: {score: -1, name: 1}}, {reactive: false}
.fetch()
previousPlayer = sortedPlayers[0]
rank = 1
#update {_id: previousPlayer._id}, {$set: {rank: rank, rankStr: "#{rank}"}}
tiedCounter = 1
for player in sortedPlayers[1..]
do (player) =>
if player.score is previousPlayer.score
tiedCounter = tiedCounter + 1
#update {_id: player._id}, {$set: {rank: rank, rankStr: "-"}}
else
rank = rank + tiedCounter
#update {_id: player._id}, {$set: {rank: rank, rankStr: "#{rank}"}}
previousPlayer = player
self = #
debugger
aflLeaderboard = new Leaderboard("aflLeaderboard") unless aflLeaderboard?
# debugger
if Meteor.isClient
Template.leaderboard.helpers
players: -> aflLeaderboard.find {}, {sort: {score: -1, name: 1}}
#Template.leaderboard.events
# 'click tr#sortButtonRow>th#sortButtonCell>button': leaderboard = players()
Template.player.events
'click tr>td>button': ->
aflLeaderboard.update {_id: player._id}, {$inc: {score: 1}}
# debugger
if Meteor.isServer
Meteor.startup ->
if not (aflLeaderboard? and aflLeaderboard.findOne({})?)
aflLeaderboard.insertPlayers [
"Heath Shaw"
"Alex Rance"
"Easton Wood"
]
aflLeaderboard.insertRank()
self = #
Is it really a source map problem?
If it is, what's wrong with my launch.json file?
Thanks
i think you need check this two property:
"program": "${file}", //important, make sure debug current file
"outFiles": [//important, where to find sourcemap js file
"${workspaceFolder}/dist/api/api.js"
]
I've set up a gruntfile that looks like the following. The aim being to perform e2e tests using protractor for my angularjs project. When running this using mochaProtractor Chrome fires up as expect but the lint is telling me that I'm missing the dependancies for the expect statement. This should referencing the assertion library chai. How do I include dependencies for chai to get this to work correctly?
Thanks
module.exports = function(grunt) {
// Project configuration.
grunt.initConfig({
watch: {
scripts: {
files: ['public/specs/e2e/*.js'],
tasks: ['mochaProtractor','jshint'],
options: {
spawn: false,
},
},
},
jshint: {
all: [
'Gruntfile.js',
'public/app/js/*.js'
],
options: {
curly: true,
eqeqeq: true,
immed: true,
latedef: true,
newcap: true,
noarg: true,
sub: true,
undef: true,
unused: true,
boss: true,
eqnull: true,
node: true
}
},
mochaProtractor: {
options: {
browsers: ['Chrome']
},
files: ['public/specs/e2e/*.js'],
baseUrl: 'http://localhost:3000/'
},
});
// These plugins provide necessary tasks.
grunt.loadNpmTasks('grunt-contrib-jshint');
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-mocha-protractor');
Below is the spec I'm trying to test against. Note: I added the require statement at the top in attempt to get it to work. Any thoughts?
var chai = require('chai');
var expect = chai.expect;
describe("Given a task entry screen", function() {
ptor = protractor.getInstance();
beforeEach(function() {
ptor.get('#/');
button = ptor.findElement(protractor.By.className('btn-say-hello'));
button.click();
});
it('says hello', function() {
message = ptor.findElement(protractor.By.className('message'));
expect(message.getText()).toEqual('Hello!');
});
});
You have to add chai as a dev dependency in your package.json.
{
"name": "yourProject",
"devDependencies": {
"chai": "~1.8.1"
}
}
then install the dependenvy via
npm install`
and then you should be able to write a spec :
var chai = require('chai');
var expect = chai.expect;
describe("Given a task entry screen", function() {
ptor = protractor.getInstance();
beforeEach(function() {
ptor.get('#/');
button = ptor.findElement(protractor.By.className('btn-say-hello'));
button.click();
});
it('says hello', function() {
message = ptor.findElement(protractor.By.className('message'));
expect(message.getText()).toEqual('Hello!');
});
});