Httpbeat Metrics not showing up in Kibana - kibana

Dears,
I'm new to Kibana/Elasticsearch/Httpbeat and setting it up is causing me a bit of a headace...
Httpbeat runs and pumps data into Elasticsearch:
Although, when I try to create a visualization I run into trouble;
somehow the data is not there...
This might also be usefull:
And the template json:
{
"mappings": {
"_default_": {
"_meta": {
"version": "5.4.0"
},
"dynamic_templates": [
{
"strings_as_keyword": {
"mapping": {
"ignore_above": 1024,
"type": "keyword"
},
"match_mapping_type": "string"
}
}
],
"properties": {
"#timestamp": {
"type": "date"
},
"beat": {
"properties": {
"hostname": {
"ignore_above": 1024,
"type": "keyword"
},
"name": {
"ignore_above": 1024,
"type": "keyword"
},
"version": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"meta": {
"properties": {
"cloud": {
"properties": {
"availability_zone": {
"ignore_above": 1024,
"type": "keyword"
},
"instance_id": {
"ignore_above": 1024,
"type": "keyword"
},
"machine_type": {
"ignore_above": 1024,
"type": "keyword"
},
"project_id": {
"ignore_above": 1024,
"type": "keyword"
},
"provider": {
"ignore_above": 1024,
"type": "keyword"
},
"region": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
}
},
"request": {
"properties": {
"body": {
"ignore_above": 1024,
"type": "keyword"
},
"headers": {
"properties": {},
"type": "nested"
},
"method": {
"ignore_above": 1024,
"type": "keyword"
},
"url": {
"ignore_above": 1024,
"type": "keyword"
}
}
},
"response": {
"properties": {
"body": {
"ignore_above": 1024,
"type": "keyword"
},
"code": {
"ignore_above": 1024,
"type": "keyword"
},
"headers": {
"properties": {},
"type": "nested"
},
"jsonBody": {
"properties": {
"globalTime": {
"type": "long"
}
}
}
}
},
"tags": {
"ignore_above": 1024,
"type": "keyword"
}
}
}
},
"order": 0,
"settings": {
"index.mapping.total_fields.limit": 10000,
"index.refresh_interval": "1m"
},
"template": "httpbeat-*"
}
The httpbeat.yml
######################## Httpbeat Configuration Example ########################
############################## Httpbeat ########################################
httpbeat:
hosts:
# Each - Host endpoints to call. Below are the host endpoint specific configurations
-
# Optional cron expression, defines when to poll the host endpoint.
# Default is every 1 minute.
schedule: "#every 1m"
# The URL endpoint to call by Httpbeat
url: (a correct url)
# HTTP method to use.
# Possible options are:
# * get
# * delete
# * head
# * patch
# * post
# * put
method: get
# Optional additional headers to send to the endpoint
#headers:
#Accept: application/json
# Optional basic authentication
basic_auth:
# Basic authentication username
username: theetsa
# Basic authentication password
password: (a very secret password)
# Type to be published in the 'type' field. For Elasticsearch output,
# the type defines the document type these entries should be stored
# in. Default: httpbeat
#document_type:
# Optional output format for the response body.
# Possible options are:
# * string
# * json
# Default output format is 'string'
output_format: json
# Optional convertion of dots in keys in JSON response body. By default is off.
# Possible options are:
# * replace - replaces dots with a different character. The default value is `_`.
# * unflatten - converts {"foo.bar":false} to {"foo":{"bar":false}}
#json_dot_mode: replace
# Optional additional headers to send to the endpoint
#headers:
#Accept: application/json
# Enable SSL support. SSL is automatically enabled, if any SSL setting is set.
#ssl.enabled: true
# Configure SSL verification mode. If `none` is configured, all server hosts
# and certificates will be accepted. In this mode, SSL based connections are
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
# `full`.
#ssl.verification_mode: full
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
# 1.2 are enabled.
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
# Optional SSL configuration options. SSL is off by default.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
# Optional passphrase for decrypting the Certificate Key.
#ssl.key_passphrase: ''
# Configure cipher suites to be used for SSL connections
#ssl.cipher_suites: []
# Configure curve types for ECDHE based cipher suites
#ssl.curve_types: []
#================================ General =====================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
#================================ Outputs =====================================
# Configure what outputs to use when sending the data collected by the beat.
# Multiple outputs may be used.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["localhost:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: critical, error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
I really don't know what I'm doing wrong :-/
I tried to use the same settings as in Metricbeat; where the graphs do work, I also looked inside the logs but couldn't find anything usefull there...
I noticed that the beat version is 4.0.0; which might be the issue, I really don't know :-/
Thanks for any help or pointers...
S.

I'm not sure what did the trick but I
Stopped httpBeat
stopped elasticSearch
deleted all indexes
rm -Rf data/nodes/0/*
restarted elasticSearch
used this template:
httpbeat.template-es2x.json:
{
"mappings": {
"my_type": {
"_meta": {
"version": "5.4.0"
},
"dynamic_templates": [
{
"integers": {
"match_mapping_type": "long",
"mapping": {
"type": "integer"
}
}
}
],
"properties": {
"#timestamp": {
"type": "date"
},
"response": {
"properties": {
"jsonBody": {
"properties": {
"globalTime": {
"type": "long"
}
}
}
}
}
},
"fields": {
"properties": {}
}
-> more about this below...
}
},
"order": 0,
"settings": {
"index.mapping.total_fields.limit": 10000,
"index.refresh_interval": "1m"
},
"template": "httpbeat-*"
}
and restarted everything
I think mostly the 'fields' was important; when I used the template without I got an error in Kibana about something with 'fields' and:
"fields": {
"properties": {}
}
Was something that was present inside metricbeat.template-es2x.json and not in httpbeat.template-es2x.json it seems to work with that field inside httpbeat.template-es2x.json and not httpbeat.template.json...
Grtz,
S.
ps: if you have an answer that is not based on trail and error I'll accept that instead of this one...

Related

Integromat app {{connection.param}} not working

I am trying to setup an own App in Integromat
What is required for my App is an URL (and later a Bearer Token) to be entered manually by the user who wants to use my App.
I have the Apps Base:
{
"baseUrl": "{{connection.url}}",
"log": {
"sanitize": ["request.headers.authorization"]
}
}
a Connection:
Parameters:
[
{
"name": "url",
"label": "url",
"type": "text",
"required": true,
"value":"https://my-server"
}
]
and the Scenario:
{
"url": "/api/endpoint",
"method": "GET",
"qs": {},
"headers": "{{connection.headers}}",
"response": {
"output": "{{body}}"
}
}
When i execute, the scenario from my App. The URL seems not to be correctly taken over from the one configured inside the connection parametrs.
Can someone help?
Everything was right. I had to delete the old Connection and create a new one.

Logic App workflow creating or updating documents in CosmosDB fails with conflict

I'm in the process of creating a Logic App which should copy all documents from specific containers in the database to a different database and its corresponding collections.
For this, I've created a workflow which retrieves all documents, loops through them and tries to do a Create or Update document on the target location.
As you can see on the image, this looks fairly straightforward.
I've specified the PartitionKey in the header and Upsert is set to true, so if a document already exists it'll be updated, otherwise created.
Originally, the body was filled with #items('[blurred]_-_For_each_document').
However, I got an error stating there was a conflict on the id.
I've also tried to remove all 'system' properties like so:
#removeProperty(removeProperty(removeProperty(removeProperty(removeProperty(items('[blurred]_-_For_each_document'), 'id'), '_rid'), '_self'), '_ts'), '_etag')
but it appears not having an id in place isn't valid, so now I've got this as I'm only interested in having the contents of the 'actual' document:
#removeProperty(removeProperty(removeProperty(removeProperty(items('[blurred]_-_For_each_document'), '_rid'), '_self'), '_ts'), '_etag')
This still fails.
The raw input looks a bit like this:
{
"method": "post",
"headers": {
"x-ms-documentdb-raw-partitionkey": "1050",
"x-ms-documentdb-is-upsert": "True"
},
"path": "/dbs/[myDatabase]/colls/[myCollection]/docs",
"host": {
"connection": {
"name": "/subscriptions/[subscriptionGuid]/resourceGroups/[resourcegroup]/providers/Microsoft.Web/connections/[connectionName]"
}
},
"body": {
"id": "2faee185-4a51-4797-bff9-3ce23a603690",
"MyPartitionKeyNumber": 1050,
"SomeValue": false,
}
}
With the following response:
{
"code": "Conflict",
"message": "Entity with the specified id already exists in the system., ... ResourceType: Document, OperationType: Upsert ..."
}
I know for a fact, the id doesn't exist, but changed the step to do a PUT anyway.
{
"method": "put",
"headers": {
"x-ms-documentdb-raw-partitionkey": "1050",
"x-ms-documentdb-is-upsert": "True"
},
"path": "/dbs/[myDatabase]/colls/[myCollection]/docs",
"host": {
"connection": {
"name": "/subscriptions/[subscriptionGuid]/resourceGroups/[resourcegroup]/providers/Microsoft.Web/connections/[connectionName]"
}
},
"body": {
"id": "2faee185-4a51-4797-bff9-3ce23a603690",
"MyPartitionKeyNumber": 1050,
"SomeValue": false,
}
}
With a response I'm expecting to see:
{
"statusCode": 404,
"message": "Resource not found"
}
I've also tried to do a Delete document before running the Create or Update document step, but got the same error(s).
There's some post here on Stack Overflow stating the x-ms-documentdb-raw-partitionkey should be x-ms-documentdb-partitionkey (without the raw-part), but doing so results in the following error message:
PartitionKey extracted from document doesn't match the one specified in the header
For completeness sake, these are the relevant steps of my workflow:
"[blurred]_-_Get_all_documents": {
"type": "ApiConnection",
"inputs": {
"host": {
"connection": {
"name": "#parameters('$connections')['documentdb_3']['connectionId']"
}
},
"method": "get",
"path": "/v2/dbs/#{encodeURIComponent('myDatabase')}/colls/#{encodeURIComponent(items('[blurred]_-_For_each_collection'))}/docs"
},
"runAfter": {}
},
"[blurred]_-_For_each_document": {
"type": "Foreach",
"foreach": "#body('[blurred]_-_Get_all_documents')?['value']",
"actions": {
"[blurred]_-_Create_or_Update_document_on_blurred2": {
"type": "ApiConnection",
"inputs": {
"host": {
"connection": {
"name": "#parameters('$connections')['documentdb_4']['connectionId']"
}
},
"method": "post",
"body": "#removeProperty(removeProperty(removeProperty(removeProperty(items('[blurred]_-_For_each_document'), '_rid'), '_self'), '_ts'), '_etag')",
"headers": {
"x-ms-documentdb-raw-partitionkey": "#{items('[blurred]_-_For_each_document')['MyPartitionKeyNumber']}",
"x-ms-documentdb-is-upsert": true
},
"path": "/dbs/#{encodeURIComponent('myDatabase')}/colls/#{encodeURIComponent(items('[blurred]_-_For_each_collection'))}/docs"
},
"runAfter": {}
}
},
"runAfter": {
"[blurred]_-_Get_all_documents": [
"Succeeded"
]
}
}

Google Cloud Endpoints: Transcoding messages with protobuf.Any fields

I have a grpc service that takes as input a message that contains fields of type protobuf.Any and I can't figure out the way to write correct json input for it. I am running on GKE, with cloud endpoint ESP and my service running in the same pod.
The protos look like:
message AnyArray {
repeated google.protobuf.Any value = 1;
}
message Metric {
string metric = 1;
int64 timestamp = 2;
double value = 3;
map<string, AnyArray> metadata = 4;
}
I have tried multiple combinations for the input json with no luck, most of the times cloud endpoints returns "Proto field is not repeating, cannot start list." Failed examples:
{
"metadata": {
"sample-key": {
"value": [1, "one"]
}
},
"metric": "request-count",
"timestamp": 1528425789,
"value": 0
}
{
"metadata": {
"sample-key": {
"value": [{
"#type": "type.googleapis.com/google.protobuf.Duration",
"value": "1.212s"
}, {
"#type": "type.googleapis.com/google.protobuf.Duration",
"value": "1.212s"
}]
}
},
"metric": "request-count",
"timestamp": 1528425789,
"value": 0
}
Response from ESP
{
"code": 3,
"message": "metadata[0].value: Proto field is not repeating, cannot start list.",
"details": [{
"#type": "type.googleapis.com/google.rpc.DebugInfo",
"stackEntries": [],
"detail": "internal"
}]
}
Any help would be greatly appreciated.
Thanks!
google.protobuf.Any means any proto message type so your first example doesn't work. For the second example, google.protobuf.Duration have a special JSON mapping, so following example should work:
{
"metadata": {
"sample-key": {
"value": [{
"#type": "type.googleapis.com/google.protobuf.Duration",
"value": "1.212s"
}, {
"#type": "type.googleapis.com/google.protobuf.Duration",
"value": "1.212s"
}]
}
},
"metric": "request-count",
"timestamp": 1528425789,
"value": 0
}
Note you will have to include all possible proto message types for the google.protobuf.Any in the proto descriptor set provided Cloud Endpoints service, otherwise it will fail to translate unknown types.
Another possible approach is use google.protobuf.Struct for your metadata, which eliminates the limitation above, but you will have to convert it to your expected types in your gRPC service.
Using protobuf.Struct instead of protobuf.Any solved my problem as #lizan recommended. Thanks!

"AnonymousModel" Embedded Models in "Application Model" with Push Component and Postgre

How is it possible to use the Application Model with APNS settings and Postgre.
The Application Models has embedded Models.
I'm right that in tranditional databases the embedded models are simply saved as object?
The String field of pushsettings has a varchar(1024).
The Push Example does this:
pushSettings: {
apns: {
certData: config.push.apnsCertData,
keyData: config.push.apnsKeyData,
feedbackOptions: {
batchFeedback: true,
interval: 300
}
},
gcm: {
serverApiKey: config.push.gcmServerApiKey
}
}
}
the certData and keyData are to long for the 1024 chars.
So how to use this correct with Postgres?
Right now the only thing I see is to extend the application model and set the pushSettings field to a larger value, but I am not able to get this work too.
Please Please help me
Regards
I mananged to get it work like I thought in first place.
The extended application Model json:
{
"name": "application",
"base": "Application",
"properties": {
"pushSettings": {
"postgresql": {
"dataType": "text",
"dataLength": null,
"nullable": "YES"
},
"apns": {
"production": {
"type": "boolean",
"description": [
"Production or development mode. It denotes what default APNS",
"servers to be used to send notifications.",
"See API documentation for more details."
]
},
"certData": {
"type": "string",
"description": "The certificate data loaded from the cert.pem file"
},
"keyData": {
"type": "string",
"description": "The key data loaded from the key.pem file"
},
"pushOptions": {
"type": {
"gateway": "string",
"port": "number"
}
},
"feedbackOptions": {
"type": {
"gateway": "string",
"port": "number",
"batchFeedback": "boolean",
"interval": "number"
}
}
},
"gcm": {
"serverApiKey": "string"
}
}

Azure Resource Manager - Multiple VM NAT Rules

I am trying to create an ARM template that will provision multiple webservers with directly accessible ports. For instance I want a VM to have either port 9001 or 9002 open based on what the index of the VM is.
I am struggling to get the frontendPort parameter to accept a function. Here is the documentation that I have used.
Here is what the relevant portion of my template looks like:
"inboundNatRules": [
{
"copy": {
"name": "natCopy",
"count": "[parameters('numberOfVms')]"
},
"name": "[concat('directHttps-', copyIndex())]",
"properties": {
"frontendIPConfiguration": {
"id": "[concat(variables('lbID'),'/frontendIPConfigurations/LoadBalancerFrontEnd')]"
},
"frontendPort": "[add(9001, copyIndex())]",
"backendPort": 9001,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 4,
"protocol": "Tcp",
"backendIPConfiguration": {
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNicName'), copyIndex()), 'ipconfig')]"
}
}
}
]
I was hoping that the this particular port would result in either "9001", or "9002".
"frontendPort": "[add(9001, copyIndex())]"
Instead, I see an error in Visual Studio's Intellisense, and when I try to deploy the solution.
Create template deployment 'deploymenttemplate-0107-1555'.
New-AzureRmResourceGroupDeployment : Resource Microsoft.Network/loadBalancers 'webserverLb'
failed with message 'Unable to process template language expressions for resource
'/subscriptions/some random guid/resourceGroups/webservers/providers/Microsoft.Network/loadBalancers/webserverLb'
at line '102' and column '10'. 'The template function 'copyIndex' is not expected at this location.
The function can only be used in a resource with copy specified.
Long story short, I'm simply trying to have the same number of NAT rules as I have VM's in the template, and dynamically assign the external port number.
Please let me know if I can provide any more information. Thank you.
Try:
[Concat(900,CopyIndex(1))]
which will offset the index (0 based) and give you the number you want.
This is the syntax that works for copying the NAT rules (I am adding an RDP rule on the standard back-end port):
"copy": [
{
"name": "inboundNatRules",
"count": "[parameters('numberOfWebInstances')]",
"input": {
"name": "[concat(parameters('lbNatRulePrefix'), copyindex('inboundNatRules'))]",
"properties": {
"frontendIPConfiguration": {
"id": "[variables('lbFrontEndIpId')]"
},
"frontendPort": "[add(50001, copyIndex('inboundNatRules'))]",
"backendPort": 3389,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 4,
"protocol": "tcp"
}
}
}
],
And then to apply the rules to the NIC, you actually need to add some code on the NIC itself. The following is for both LB rules and NAT rules:
"loadBalancerBackendAddressPools": [
{
"id": "[concat(variables('lbID'), '/backendAddressPools/', parameters('lbPoolName'))]"
}
],
"loadBalancerInboundNatRules": [
{
"id": "[concat(variables('lbID'),'/inboundNatRules/' , parameters('lbNatRulePrefix'), copyindex())]"
}
]
#Your script is wrong it should you are writing copyindex() but you need to pass the name of rule it should work.
"inboundNatRules": [
{
"copy": {
"name": "natCopy",
"count": "[parameters('numberOfVms')]"
},
"name": "[concat('directHttps-', copyIndex(natCopy,1))]",
"properties": {
"frontendIPConfiguration": {
"id": "[concat(variables('lbID'),'/frontendIPConfigurations/LoadBalancerFrontEnd')]"
},
"frontendPort": "[add(9001, copyIndex(natCopy,1))]",
"backendPort": 9001,
"enableFloatingIP": false,
"idleTimeoutInMinutes": 4,
"protocol": "Tcp",
"backendIPConfiguration": {
"id": "[resourceId('Microsoft.Network/networkInterfaces', concat(variables('vmNicName'), copyIndex(natCopy,1)), 'ipconfig')]"
}
}
}
$LoadBalancer = Get-AzureRmLoadBalancer -ResourceGroupName $ResourceGroupName -Name $LoadBalancerName
$publicIP1 = Get-AzureRmPublicIpAddress -name $pipName -resourcegroupname $ResourceGroupName
$frontendIP1 = Get-AzureRmLoadBalancerFrontendIpConfig -LoadBalancer $LoadBalancer -Name $FrontendIpConfigName
$LoadBalancer | Add-AzureRmLoadBalancerInboundNatRuleConfig -Name "nat_rule_tcp_IP1_49157" -FrontendIpConfiguration $frontendIP1 -IdleTimeoutInMinutes 4 -Protocol TCP -FrontendPort 49157 -BackendPort 49157 | Set-AzureRmLoadBalancer

Resources