How to communicate between python server and php client using grpc? - grpc
I can communicate using python client and server but confused with php client. I am confused about the protobufers, a small program explaining the entire process would be very helpful.
I have gone through many documentations but still very confused about the actual flow.
calculator.proto
syntax = "proto3";
message Request {
int32 num1 = 1;
int32 num2 = 2;
}
message Response{
int32 result = 1;
}
service Calculator {
rpc Sum(Request) returns (Response) {}
}
calculator.py
def sum(x1,x2):
y= x1+x2
return y
server.py
import grpc
from concurrent import futures
import time
# import the generated classes
import calculator_pb2
import calculator_pb2_grpc
# import the original calculator.py
import calculator
# create a class to define the server functions, derived from
# calculator_pb2_grpc.CalculatorServicer
class CalculatorServicer(calculator_pb2_grpc.CalculatorServicer):
# calculator.sum is exposed here
def Sum(self, request, context):
response = calculator_pb2.Response()
response.result = calculator.sum(request.num1,request.num2)
print 'Result:',response.result
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# use the generated function `add_CalculatorServicer_to_server`
# to add the defined class to the server
calculator_pb2_grpc.add_CalculatorServicer_to_server(
CalculatorServicer(), server)
# listen on port 50051
print('Starting server. Listening on port 50051.')
server.add_insecure_port('[::]:50051')
server.start()
# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
client.py
import grpc
# import the generated classes
import calculator_pb2
import calculator_pb2_grpc
# open a gRPC channel
channel = grpc.insecure_channel('localhost:50051')
# create a stub (client)
stub = calculator_pb2_grpc.CalculatorStub(channel)
while True:
try:
# create a valid request message
numbers = calculator_pb2.Request(num1=int(input("Enter number1: ")),num2=int(input("Enter number2: ")))
# make the call
response = stub.Sum(numbers)
# print 'Result:',response.result
except KeyboardInterrupt:
print("KeyboardInterrupt")
channel.unsubscribe(close)
exit()
This setup return the addition of two number in the server(python).
I want the same functionality with python as server and php as client.
I could communicate with php client and python server. Below are the steps and example:
Steps:
--------------------------------
Python
--------------------------------
1. Make a folder named grpc
2. Install pip for python
3.Install virtual environment
python -m pip install virtualenv
4. Run the commands
virtualenv venv
source venv/bin/activate
python -m pip install --upgrade pip
5. Then install grpcio
python -m pip install grpcio
6. Install grpc tools
python -m pip install grpcio-tools
7. Generate protobuf
python -m grpc_tools.protoc -I./proto --python_out=. --grpc_python_out=. ./proto/calculator.proto
8. Make server.py
---------------------------------
PHP
---------------------------------
1. Install composer
composer install
2. Run these commands to install grpc using pecl
sudo apt-get install php-dev
pecl
sudo pecl install grpc
3. Download protoc version > 3.7
commands:
PROTOC_ZIP=protoc-3.7.1-linux-x86_64.zip
curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v3.7.1/$PROTOC_ZIP
sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc
sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*'
rm -f $PROTOC_ZIP
4. Clone this repo
git clone -b v1.27.0 https://github.com/grpc/grpc
5. Run this command
cd grpc && git submodule update --init && make grpc_php_plugin
cd examples/php/route_guide
./route_guide_proto_gen.sh
6. Move to root then run: make grpc_php_plugin
7.Generate protobuf
protoc --proto_path=examples/protos --php_out=examples/php/route_guide --grpc_out=examples/php/route_guide --plugin=protoc-gen-grpc=bins/opt/grpc_php_plugin ./examples/protos/calculator.proto
8. Make index.php
index.php
<?php
include __DIR__ . '/vendor/autoload.php';
include __DIR__ . '/grpc/examples/php/route_guide/Calculator/CalculatorClient.php';
include __DIR__ . '/grpc/examples/php/route_guide/Calculator/SumRequest.php';
include __DIR__ . '/grpc/examples/php/route_guide/Calculator/SumResponse.php';
include __DIR__ . '/grpc/examples/php/route_guide/GPBMetadata/Calculator.php';
function add()
{
// Listening to port
$client = new \Calculator\CalculatorClient('localhost:6000', [
'credentials' => Grpc\ChannelCredentials::createInsecure(),
]);
while (true) {
try {
// request object
$request = new \Calculator\SumRequest();
echo " Num1: ";
// num1
$request->setNum1((int) rtrim(fgets(STDIN)));
echo " Num2: ";
// num2
$request->setNum2((int) rtrim(fgets(STDIN)));
list($res, $status) = $client->Sum($request)->wait();
// result
echo $res->getResult() . "\n";
} catch (Exception $error) {
echo $error;
}
}
}
try {
add();
// phpinfo();
} catch (Exception $e) {
echo $e;
}
calculator.py
def sum(x1,x2):
y= x1+x2
return y
server.py
import grpc
from concurrent import futures
import time
# import the generated classes
import calculator_pb2
import calculator_pb2_grpc
# import the original calculator.py
import calculator
# create a class to define the server functions, derived from
# calculator_pb2_grpc.CalculatorServicer
class CalculatorServicer(calculator_pb2_grpc.CalculatorServicer):
# calculator.sum is exposed here
def Sum(self, request, context):
response = calculator_pb2.SumResponse()
response.result = calculator.sum(request.num1,request.num2)
print 'Result:',response.result
return response
# create a gRPC server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
# use the generated function `add_CalculatorServicer_to_server`
# to add the defined class to the server
calculator_pb2_grpc.add_CalculatorServicer_to_server(
CalculatorServicer(), server)
# listen on port 50051
print('Starting server. Listening on port 6000.')
server.add_insecure_port('[::]:6000')
server.start()
# since server.start() will not block,
# a sleep-loop is added to keep alive
try:
while True:
time.sleep(86400)
except KeyboardInterrupt:
server.stop(0)
calculator.proto
syntax = "proto3";
package Calculator;
message SumRequest {
int64 num1 = 1;
int64 num2 = 2;
}
message SumResponse{
int64 result = 1;
}
service Calculator {
rpc Sum(SumRequest) returns (SumResponse) {}
}
Also go through these link for better understanding:
PHP - https://grpc.io/docs/tutorials/basic/php/
Python - https://grpc.io/docs/tutorials/basic/python/
Related
nix-shell script does nothing when using script
I'm quite new to Nix and I'm trying to create a very simple shell.nix script file. Unfortunately I need an old package: mariadb-10.4.21. After reading and searching a bit I found out that version 10.4.17 (would've been nice to have the exact version but I couldn't find it) is in channel nixos-20.09, but when I do $ nix-shell --version nix-shell (Nix) 2.5.1 $ cat shell.nix let pkgs = import <nixpkgs> {}; # git ls-remote https://github.com/nixos/nixpkgs nixos-20.09 pkgs-20_09 = import (builtins.fetchGit { name = "nixpks-20.09"; url = "https://github.com/nixos/nixpkgs"; ref = "refs/heads/nixos-20.09"; rev = "1c1f5649bb9c1b0d98637c8c365228f57126f361"; }) {}; in pkgs.stdenv.mkDerivation { pname = "test"; version = "0.1.0"; buildInputs = [ pkgs-20_09.mariadb ]; } $ nix-shell it just waits indefinitely without doing anything. But if I do $ nix-shell -p mariadb -I nixpkgs=https://github.com/NixOS/nixpkgs/archive/1c1f5649bb9c1b0d98637c8c365228f57126f361.tar.gz [...] /nix/store/yias2v8pm9pvfk79m65wdpcby4kiy91l-mariadb-server-10.4.17 [...] copying path '/nix/store/yias2v8pm9pvfk79m65wdpcby4kiy91l-mariadb-server-10.4.17' from 'https://cache.nixos.org'... [nix-shell:~/Playground]$ mariadb --version mariadb Ver 15.1 Distrib 10.4.17-MariaDB, for Linux (x86_64) using readline 5.1 it works perfectly. What am I doing wrong in the script for it to halt? EDIT: I got a bit more info by running $ nix-shell -vvv [...] did not find cache entry for '{"name":"nixpks-20.09","rev":"1c1f5649bb9c1b0d98637c8c365228f57126f361","type":"git"}' did not find cache entry for '{"name":"nixpks-20.09","ref":"refs/heads/nixos-20.09","type":"git","url":"https://github.com/nixos/nixpkgs"}' locking path '/home/test/.cache/nix/gitv3/17blyky0ja542rww32nj04jys1r9vnkg6gcfbj83drca9a862hwp.lock' lock acquired on '/home/test/.cache/nix/gitv3/17blyky0ja542rww32nj04jys1r9vnkg6gcfbj83drca9a862hwp.lock.lock' fetching Git repository 'https://github.com/nixos/nixpkgs'... Is it me or it seems like it's trying to fetch from two different sources? As far as I understood all three url, rev and ref are needed for git-fetching, but it looks like if it's splitting them. EDIT2: I've been trying with fetchFromGitHub pkgs-20_09 = import (pkgs.fetchFromGitHub { name = "nixpks-20.09"; owner = "nixos"; repo = "nixpkgs"; rev = "1c1f5649bb9c1b0d98637c8c365228f57126f361"; sha256 = "0f2nvdijyxfgl5kwyb4465pppd5vkhqxddx6v40k2s0z9jfhj0xl"; }) {}; and fetchTarball pkgs-20_09 = import (builtins.fetchTarball "https://github.com/NixOS/nixpkgs/archive/1c1f5649bb9c1b0d98637c8c365228f57126f361.tar.gz") {}; and both work just fine. I'll use fetchFromGitHub from now on but it'd be interesting to now why fetchGit doesn't work.
When calling a function, how to feed variable as an arg
I have the code below that works, but instead of calling the function with "www.google.com", i need to be able to pass as arg: python certexp.py www.google.com: import ssl import OpenSSL import time def get_SSL_Expiry_Date(host, port): cert = ssl.get_server_certificate((host, 443)) x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) raw_date = x509.get_notAfter() decoded_date = raw_date.decode("utf-8") print (decoded_date) dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz") print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year) get_SSL_Expiry_Date("google.com", 443) Thank you
In python the sys module handles command line arguments. This gives you an array of command line parameters, with sys.argv[0] being the name of the executable, then any subsequent elements being user parameters. This makes your code: import ssl import OpenSSL import time import sys def get_SSL_Expiry_Date(host, port): cert = ssl.get_server_certificate((host, 443)) x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) raw_date = x509.get_notAfter() decoded_date = raw_date.decode("utf-8") print (decoded_date) dexpires = time.strptime(decoded_date, "%Y%m%d%H%M%Sz") print (dexpires.tm_mon,"/",dexpires.tm_mday,"/",dexpires.tm_year) if (len(sys.argv) == 1): sys.stderr.write("%s: Give hostname as an argument, optionally a port too" % (sys.argv[0])) sys.exit(1) hostname = sys.argv[1] port = 443 if (len(sys.argv) == 3): port = int(sys.argv[2]) get_SSL_Expiry_Date(hostname, port) Obviously you could do that for the port too. There's other command line parsing modules too, so you can say --port= etc.
Removing Airflow task logs
I'm running 5 DAG's which have generated a total of about 6GB of log data in the base_log_folder over a months period. I just added a remote_base_log_folder but it seems it does not exclude logging to the base_log_folder. Is there anyway to automatically remove old log files, rotate them or force airflow to not log on disk (base_log_folder) only in remote storage?
Please refer https://github.com/teamclairvoyant/airflow-maintenance-dags This plugin has DAGs that can kill halted tasks and log-cleanups. You can grab the concepts and can come up with a new DAG that can cleanup as per your requirement.
We remove the Task logs by implementing our own FileTaskHandler, and then pointing to it in the airflow.cfg. So, we overwrite the default LogHandler to keep only N task logs, without scheduling additional DAGs. We are using Airflow==1.10.1. [core] logging_config_class = log_config.LOGGING_CONFIG log_config.LOGGING_CONFIG BASE_LOG_FOLDER = conf.get('core', 'BASE_LOG_FOLDER') FOLDER_TASK_TEMPLATE = '{{ ti.dag_id }}/{{ ti.task_id }}' FILENAME_TEMPLATE = '{{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number }}.log' LOGGING_CONFIG = { 'formatters': {}, 'handlers': { '...': {}, 'task': { 'class': 'file_task_handler.FileTaskRotationHandler', 'formatter': 'airflow.job', 'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER), 'filename_template': FILENAME_TEMPLATE, 'folder_task_template': FOLDER_TASK_TEMPLATE, 'retention': 20 }, '...': {} }, 'loggers': { 'airflow.task': { 'handlers': ['task'], 'level': JOB_LOG_LEVEL, 'propagate': False, }, 'airflow.task_runner': { 'handlers': ['task'], 'level': LOG_LEVEL, 'propagate': True, }, '...': {} } } file_task_handler.FileTaskRotationHandler import os import shutil from airflow.utils.helpers import parse_template_string from airflow.utils.log.file_task_handler import FileTaskHandler class FileTaskRotationHandler(FileTaskHandler): def __init__(self, base_log_folder, filename_template, folder_task_template, retention): """ :param base_log_folder: Base log folder to place logs. :param filename_template: template filename string. :param folder_task_template: template folder task path. :param retention: Number of folder logs to keep """ super(FileTaskRotationHandler, self).__init__(base_log_folder, filename_template) self.retention = retention self.folder_task_template, self.folder_task_template_jinja_template = \ parse_template_string(folder_task_template) #staticmethod def _get_directories(path='.'): return next(os.walk(path))[1] def _render_folder_task_path(self, ti): if self.folder_task_template_jinja_template: jinja_context = ti.get_template_context() return self.folder_task_template_jinja_template.render(**jinja_context) return self.folder_task_template.format(dag_id=ti.dag_id, task_id=ti.task_id) def _init_file(self, ti): relative_path = self._render_folder_task_path(ti) folder_task_path = os.path.join(self.local_base, relative_path) subfolders = self._get_directories(folder_task_path) to_remove = set(subfolders) - set(subfolders[-self.retention:]) for dir_to_remove in to_remove: full_dir_to_remove = os.path.join(folder_task_path, dir_to_remove) print('Removing', full_dir_to_remove) shutil.rmtree(full_dir_to_remove) return FileTaskHandler._init_file(self, ti)
Airflow maintainers don't think truncating logs is a part of airflow core logic, to see this, and then in this issue, maintainers suggest to change LOG_LEVEL avoid too many log data. And in this PR, we can learn how to change log level in airflow.cfg. good luck.
I know it sounds savage, but have you tried pointing base_log_folder to /dev/null? I use Airflow as a part of a container, so I don't care about the files either, as long as the logger pipe to STDOUT as well. Not sure how well this plays with S3 though.
For your concrete problems, I have some suggestions. For those, you would always need a specialized logging config as described in this answer: https://stackoverflow.com/a/54195537/2668430 automatically remove old log files and rotate them I don't have any practical experience with the TimedRotatingFileHandler from the Python standard library yet, but you might give it a try: https://docs.python.org/3/library/logging.handlers.html#timedrotatingfilehandler It not only offers to rotate your files based on a time interval, but if you specify the backupCount parameter, it even deletes your old log files: If backupCount is nonzero, at most backupCount files will be kept, and if more would be created when rollover occurs, the oldest one is deleted. The deletion logic uses the interval to determine which files to delete, so changing the interval may leave old files lying around. Which sounds pretty much like the best solution for your first problem. force airflow to not log on disk (base_log_folder), but only in remote storage? In this case you should specify the logging config in such a way that you do not have any logging handlers that write to a file, i.e. remove all FileHandlers. Rather, try to find logging handlers that send the output directly to a remote address. E.g. CMRESHandler which logs directly to ElasticSearch but needs some extra fields in the log calls. Alternatively, write your own handler class and let it inherit from the Python standard library's HTTPHandler. A final suggestion would be to combine both the TimedRotatingFileHandler and setup ElasticSearch together with FileBeat, so you would be able to store your logs inside ElasticSearch (i.e. remote), but you wouldn't store a huge amount of logs on your Airflow disk since they will be removed by the backupCount retention policy of your TimedRotatingFileHandler.
Usually apache airflow grab the disk space due to 3 reasons 1. airflow scheduler logs files 2. mysql binaly logs [Major] 3. xcom table records. To make it clean up on regular basis I have set up a dag which run on daily basis and cleans the binary logs and truncate the xcom table to make the disk space free You also might need to install [pip install mysql-connector-python]. To clean up scheduler log files I do delete them manually two times in a week to avoid the risk of logs deleted which needs to be required for some reasons. I clean the logs files by [sudo rm -rd airflow/logs/] command. Below is my python code for reference ' """Example DAG demonstrating the usage of the PythonOperator.""" from airflow import DAG from airflow.operators.python import PythonOperator from datetime import datetime, timedelta from airflow.utils.dates import days_ago from airflow.operators.bash import BashOperator from airflow.providers.postgres.operators.postgres import PostgresOperator args = { 'owner': 'airflow', 'email_on_failure':True, 'retries': 1, 'email':['Your Email Id'], 'retry_delay': timedelta(minutes=5) } dag = DAG( dag_id='airflow_logs_cleanup', default_args=args, schedule_interval='#daily', start_date=days_ago(0), catchup=False, max_active_runs=1, tags=['airflow_maintenance'], ) def truncate_table(): import mysql.connector connection = mysql.connector.connect(host='localhost', database='db_name', user='username', password='your password', auth_plugin='mysql_native_password') cursor = connection.cursor() sql_select_query = """TRUNCATE TABLE xcom""" cursor.execute(sql_select_query) connection.commit() connection.close() print("XCOM Table truncated successfully") def delete_binary_logs(): import mysql.connector from datetime import datetime date = datetime.today().strftime('%Y-%m-%d') connection = mysql.connector.connect(host='localhost', database='db_name', user='username', password='your_password', auth_plugin='mysql_native_password') cursor = connection.cursor() query = 'PURGE BINARY LOGS BEFORE ' + "'" + str(date) + "'" sql_select_query = query cursor.execute(sql_select_query) connection.commit() connection.close() print("Binary logs deleted successfully") t1 = PythonOperator( task_id='truncate_table', python_callable=truncate_table, dag=dag ) t2 = PythonOperator( task_id='delete_binary_logs', python_callable=delete_binary_logs, dag=dag ) t2 << t1 '
I am surprized but it worked for me. Update your config as below: base_log_folder="" It is test in minio and in s3.
Our solution looks a lot like Franzi's: Running on Airflow 2.0.1 (py3.8) Override default logging configuration Since we use a helm chart for airflow deployment it was easiest to push an env there, but it can also be done in the airflow.cfg or using ENV in dockerfile. # Set custom logging configuration to enable log rotation for task logging AIRFLOW__LOGGING__LOGGING_CONFIG_CLASS: "airflow_plugins.settings.airflow_local_settings.DEFAULT_LOGGING_CONFIG" Then we added the logging configuration together with the custom log handler to a python module we build and install in the docker image. As described here: https://airflow.apache.org/docs/apache-airflow/stable/modules_management.html Logging configuration snippet This is only a copy on the default from the airflow codebase, but then the task logger gets a different handler. DEFAULT_LOGGING_CONFIG: Dict[str, Any] = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'airflow': {'format': LOG_FORMAT}, 'airflow_coloured': { 'format': COLORED_LOG_FORMAT if COLORED_LOG else LOG_FORMAT, 'class': COLORED_FORMATTER_CLASS if COLORED_LOG else 'logging.Formatter', }, }, 'handlers': { 'console': { 'class': 'airflow.utils.log.logging_mixin.RedirectStdHandler', 'formatter': 'airflow_coloured', 'stream': 'sys.stdout', }, 'task': { 'class': 'airflow_plugins.log.rotating_file_task_handler.RotatingFileTaskHandler', 'formatter': 'airflow', 'base_log_folder': os.path.expanduser(BASE_LOG_FOLDER), 'filename_template': FILENAME_TEMPLATE, 'maxBytes': 10485760, # 10MB 'backupCount': 6, }, ... RotatingFileTaskHandler And finally the custom handler which is just a merge of the logging.handlers.RotatingFileHandler and the FileTaskHandler. # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """File logging handler for tasks.""" import logging import os from pathlib import Path from typing import TYPE_CHECKING, Optional import requests from airflow.configuration import AirflowConfigException, conf from airflow.utils.helpers import parse_template_string if TYPE_CHECKING: from airflow.models import TaskInstance class RotatingFileTaskHandler(logging.Handler): """ FileTaskHandler is a python log handler that handles and reads task instance logs. It creates and delegates log handling to `logging.FileHandler` after receiving task instance context. It reads logs from task instance's host machine. :param base_log_folder: Base log folder to place logs. :param filename_template: template filename string """ def __init__(self, base_log_folder: str, filename_template: str, maxBytes=0, backupCount=0): self.max_bytes = maxBytes self.backup_count = backupCount super().__init__() self.handler = None # type: Optional[logging.FileHandler] self.local_base = base_log_folder self.filename_template, self.filename_jinja_template = parse_template_string(filename_template) def set_context(self, ti: "TaskInstance"): """ Provide task_instance context to airflow task handler. :param ti: task instance object """ local_loc = self._init_file(ti) self.handler = logging.handlers.RotatingFileHandler( filename=local_loc, mode='a', maxBytes=self.max_bytes, backupCount=self.backup_count, encoding='utf-8', delay=False, ) if self.formatter: self.handler.setFormatter(self.formatter) self.handler.setLevel(self.level) def emit(self, record): if self.handler: self.handler.emit(record) def flush(self): if self.handler: self.handler.flush() def close(self): if self.handler: self.handler.close() def _render_filename(self, ti, try_number): if self.filename_jinja_template: if hasattr(ti, 'task'): jinja_context = ti.get_template_context() jinja_context['try_number'] = try_number else: jinja_context = { 'ti': ti, 'ts': ti.execution_date.isoformat(), 'try_number': try_number, } return self.filename_jinja_template.render(**jinja_context) return self.filename_template.format( dag_id=ti.dag_id, task_id=ti.task_id, execution_date=ti.execution_date.isoformat(), try_number=try_number, ) def _read_grouped_logs(self): return False def _read(self, ti, try_number, metadata=None): # pylint: disable=unused-argument """ Template method that contains custom logic of reading logs given the try_number. :param ti: task instance record :param try_number: current try_number to read log from :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: log message as a string and metadata. """ # Task instance here might be different from task instance when # initializing the handler. Thus explicitly getting log location # is needed to get correct log path. log_relative_path = self._render_filename(ti, try_number) location = os.path.join(self.local_base, log_relative_path) log = "" if os.path.exists(location): try: with open(location) as file: log += f"*** Reading local file: {location}\n" log += "".join(file.readlines()) except Exception as e: # pylint: disable=broad-except log = f"*** Failed to load local log file: {location}\n" log += "*** {}\n".format(str(e)) elif conf.get('core', 'executor') == 'KubernetesExecutor': # pylint: disable=too-many-nested-blocks try: from airflow.kubernetes.kube_client import get_kube_client kube_client = get_kube_client() if len(ti.hostname) >= 63: # Kubernetes takes the pod name and truncates it for the hostname. This truncated hostname # is returned for the fqdn to comply with the 63 character limit imposed by DNS standards # on any label of a FQDN. pod_list = kube_client.list_namespaced_pod(conf.get('kubernetes', 'namespace')) matches = [ pod.metadata.name for pod in pod_list.items if pod.metadata.name.startswith(ti.hostname) ] if len(matches) == 1: if len(matches[0]) > len(ti.hostname): ti.hostname = matches[0] log += '*** Trying to get logs (last 100 lines) from worker pod {} ***\n\n'.format( ti.hostname ) res = kube_client.read_namespaced_pod_log( name=ti.hostname, namespace=conf.get('kubernetes', 'namespace'), container='base', follow=False, tail_lines=100, _preload_content=False, ) for line in res: log += line.decode() except Exception as f: # pylint: disable=broad-except log += '*** Unable to fetch logs from worker pod {} ***\n{}\n\n'.format(ti.hostname, str(f)) else: url = os.path.join("http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path).format( ti=ti, worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT') ) log += f"*** Log file does not exist: {location}\n" log += f"*** Fetching from: {url}\n" try: timeout = None # No timeout try: timeout = conf.getint('webserver', 'log_fetch_timeout_sec') except (AirflowConfigException, ValueError): pass response = requests.get(url, timeout=timeout) response.encoding = "utf-8" # Check if the resource was properly fetched response.raise_for_status() log += '\n' + response.text except Exception as e: # pylint: disable=broad-except log += "*** Failed to fetch log file from worker. {}\n".format(str(e)) return log, {'end_of_log': True} def read(self, task_instance, try_number=None, metadata=None): """ Read logs of given task instance from local machine. :param task_instance: task instance object :param try_number: task instance try_number to read logs from. If None it returns all logs separated by try_number :param metadata: log metadata, can be used for steaming log reading and auto-tailing. :return: a list of listed tuples which order log string by host """ # Task instance increments its try number when it starts to run. # So the log for a particular task try will only show up when # try number gets incremented in DB, i.e logs produced the time # after cli run and before try_number + 1 in DB will not be displayed. if try_number is None: next_try = task_instance.next_try_number try_numbers = list(range(1, next_try)) elif try_number < 1: logs = [ [('default_host', f'Error fetching the logs. Try number {try_number} is invalid.')], ] return logs, [{'end_of_log': True}] else: try_numbers = [try_number] logs = [''] * len(try_numbers) metadata_array = [{}] * len(try_numbers) for i, try_number_element in enumerate(try_numbers): log, metadata = self._read(task_instance, try_number_element, metadata) # es_task_handler return logs grouped by host. wrap other handler returning log string # with default/ empty host so that UI can render the response in the same way logs[i] = log if self._read_grouped_logs() else [(task_instance.hostname, log)] metadata_array[i] = metadata return logs, metadata_array def _init_file(self, ti): """ Create log directory and give it correct permissions. :param ti: task instance object :return: relative log path of the given task instance """ # To handle log writing when tasks are impersonated, the log files need to # be writable by the user that runs the Airflow command and the user # that is impersonated. This is mainly to handle corner cases with the # SubDagOperator. When the SubDagOperator is run, all of the operators # run under the impersonated user and create appropriate log files # as the impersonated user. However, if the user manually runs tasks # of the SubDagOperator through the UI, then the log files are created # by the user that runs the Airflow command. For example, the Airflow # run command may be run by the `airflow_sudoable` user, but the Airflow # tasks may be run by the `airflow` user. If the log files are not # writable by both users, then it's possible that re-running a task # via the UI (or vice versa) results in a permission error as the task # tries to write to a log file created by the other user. relative_path = self._render_filename(ti, ti.try_number) full_path = os.path.join(self.local_base, relative_path) directory = os.path.dirname(full_path) # Create the log file and give it group writable permissions # TODO(aoen): Make log dirs and logs globally readable for now since the SubDag # operator is not compatible with impersonation (e.g. if a Celery executor is used # for a SubDag operator and the SubDag operator has a different owner than the # parent DAG) Path(directory).mkdir(mode=0o777, parents=True, exist_ok=True) if not os.path.exists(full_path): open(full_path, "a").close() # TODO: Investigate using 444 instead of 666. os.chmod(full_path, 0o666) return full_path Maybe a final note; the links in the airflow UI to the logging will now only open the latest logfile, not the older rotated files which are only accessible by means of SSH or any other interface to access the airflow logging path.
I don't think that there is a rotation mechanism but you can store them in S3 or google cloud storage as describe here : https://airflow.incubator.apache.org/configuration.html#logs
Apigee Command Line import returns 500 with NullPointerException
I'm trying to customise the deploy scripts to allow me to deploy each of my four API proxies from the command line. It looks very similar to the one provided in the samples on Github: #!/bin/bash if [[ $# -eq 0 ]] ; then echo 'Must provide proxy name.' exit 0 fi dirname=$1 proxyname="teamname-"$dirname source ./setup/setenv.sh echo "Enter your password for user $username in the Apigee Enterprise organization $org, followed by [ENTER]:" read -s password echo Deploying $proxyname to $env on $url using $username and $org ./tools/deploy.py -n $proxyname -u $username:$password -o $org -h $url -e $env -p / -d ./$dirname echo "If 'State: deployed', then your API Proxy is ready to be invoked." echo "Run '$ sh invoke.sh'" echo "If you get errors, make sure you have set the proper account settings in /setup/setenv.sh" However when I run it, I get the following response: Deploying teamname-gameassets to int on https://api.enterprise.apigee.com using my-email-address and org-name Writing ./gameassets/teamname-gameassets.xml to ./teamname-gameassets.xml Writing ./gameassets/policies/Add-CORS.xml to policies/Add-CORS.xml Writing ./gameassets/proxies/default.xml to proxies/default.xml Writing ./gameassets/targets/development.xml to targets/development.xml Writing ./gameassets/targets/production.xml to targets/production.xml Import failed to /v1/organizations/org-name/apis?action=import&name=teamname-gameassets with status 500: { "code" : "messaging.config.beans.ImportFailed", "message" : "Failed to import the bundle : java.lang.NullPointerException", "contexts" : [ ], "cause" : { "contexts" : [ ] } } How should I go about debugging when I receive errors during the deploy process? Is there some sort of console I can view once logged in to Apigee?
I'm not sure how your proxy ended up this way, but it looks like the top-level directory is named "gameassets." It should be named "apiproxy". If you rename this directory you should see a successful deployment. Also, before you customize too much, please try out "apigeetool," which is a more flexible command-line tool for deploying proxies: https://github.com/apigee/api-platform-tools
Symfony 2.2.1 rsync deploy - not working on remote server
I'm very new to Symfony and I'm trying to automate the deploy process with rsync, while keeping both the local and remote installs of Symfony working. What I've done so far: installed Cygwin on my local machine (Windows 7+Apache2.2+PHP 5.3+MySQL 5.1) done a basic Symfony install on my local machine from shell with the command php composer.phar create-project symfony/framework-standard-edition [path]/ 2.2.1 set up a remote LAMP Ubuntu server with php-fpm (fastcgi) set up two different configuration files for local and remote in the app/config/ dir, parameters.yml and parameters.yml.remote created an app/config/rsync_exclude.txt file containing a list of files not to rsync to the remote server (as suggested in this page) created a deploy shell script that I run from Cygwin (see below) The deploy script issues the commands: rsync -avz /cygdrive/c/[path]/ user#server:[remote-path]/ --exclude-from=/cygdrive/c/[path]/app/config/rsync_exclude.txt ssh user#server 'cd [remote-path]/ && php app/console --env=prod cache:clear && php app/console cache:clear' ssh user#server 'mv [remote-path]/app/config/parameters.yml.remote ~/[remote-path]/app/config/parameters.yml' The rsync, ssh and mv commands work, but the deployed site shows always a HTTP 500 error (both app.php and app_dev.php). Looking at server error log the error is: Fatal error: Class 'Composer\\Autoload\\ClassLoader' not found in /[remote-path]/vendor/composer/autoload_real.php on line 23 Any clue would be more than welcome. Edit - here is my vendor/composer/autoload_real.php file (sorry for the making the question longer!): <?php // autoload_real.php generated by Composer class ComposerAutoloaderInit9d50f07556e53717271b583e52c7de25 { private static $loader; public static function loadClassLoader($class) { if ('Composer\Autoload\ClassLoader' === $class) { require __DIR__ . '/ClassLoader.php'; } } public static function getLoader() { if (null !== self::$loader) { return self::$loader; } spl_autoload_register(array('ComposerAutoloaderInit9d50f07556e53717271b583e52c7de25', 'loadClassLoader'), true, true); self::$loader = $loader = new \Composer\Autoload\ClassLoader(); // ^^^^^^ this is line 23 and gives the error ^^^^^^^^^^^ spl_autoload_unregister(array('ComposerAutoloaderInit9d50f07556e53717271b583e52c7de25', 'loadClassLoader')); $vendorDir = dirname(__DIR__); $baseDir = dirname($vendorDir); $map = require __DIR__ . '/autoload_namespaces.php'; foreach ($map as $namespace => $path) { $loader->add($namespace, $path); } $classMap = require __DIR__ . '/autoload_classmap.php'; if ($classMap) { $loader->addClassMap($classMap); } $loader->register(true); require $vendorDir . '/kriswallsmith/assetic/src/functions.php'; require $vendorDir . '/swiftmailer/swiftmailer/lib/swift_required.php'; return $loader; } }
If there is an error with the autoloader generated by composer, performing ... composer update ... will update your dependencies and create a new one. You should invoke the command with the -o flag if you are deploying to a production system. This way composer generates a classmap autoloader ( which performs way better ) instead of the classic autoloader. composer update -o I guess re-generating the autoloader will solve the issue :)