Related
For the below JSON, I need the result.id and result.name output using jq for the ones having
authorization.roles[].name == "Supervisor"
What is the command for jq to to that ? For the below json we expect 1231 id and name AAAA alone as output as that only has Supervisor as role
{
"results": [{
"id": "1231",
"name": "AAAA",
"div": {
"id": "AAA",
"name": "DDSAA",
"selfUri": ""
},
"chat": {
"jabberId": "nn"
},
"department": "Shared Services Organization",
"email": "Test#gmail.com",
"primaryContactInfo": [{
"address": "Test#gmail.com",
"mediaType": "EMAIL",
"type": "PRIMARY"
}],
"addresses": [],
"state": "active",
"title": "AAA",
"username": "Test#gmail.com",
"version": 27,
"authorization": {
"roles": [{
"id": "01256689-c5ed-43a5-b370-58522402830d",
"name": "AA"
}, {
"id": "1e65b009-9f8f-4eef-9844-83944002c095",
"name": "BBB"
}, {
"id": "8a19f1ff-40e5-45d2-b758-14550a173323",
"name": "CCC"
}, {
"id": "d02250e2-7071-46bf-885b-43edff2d88a6",
"name": "Supervisor"
}]
}
}, {
"id": "1255",
"name": "BBBB",
"div": {
"id": "AAA",
"name": "DDSAA",
"selfUri": ""
},
"chat": {
"jabberId": "nn"
},
"department": "Shared Services Organization",
"email": "Test#gmail.com",
"primaryContactInfo": [{
"address": "Test#gmail.com",
"mediaType": "EMAIL",
"type": "PRIMARY"
}],
"addresses": [],
"state": "active",
"title": "AAA",
"username": "Test#gmail.com",
"version": 27,
"authorization": {
"roles": [{
"id": "01256689-c5ed-43a5-b370-58522402830d",
"name": "AA"
}, {
"id": "1e65b009-9f8f-4eef-9844-83944002c095",
"name": "BBB"
}, {
"id": "8a19f1ff-40e5-45d2-b758-14550a173323",
"name": "CCC"
}, {
"id": "d02250e2-7071-46bf-885b-43edff2d88a6",
"name": "Tester"
}]
}
}]
}
Don't put commas before closing brackets or curly braces (it's not valid JSON). Your input should look like this:
{
"results": [
{
"id": "1231",
"name": "AAAA",
"div": {
"id": "AAA",
"name": "DDSAA",
"selfUri": ""
},
"chat": {
"jabberId": "nn"
},
"department": "Shared Services Organization",
"email": "Test#gmail.com",
"primaryContactInfo": [
{
"address": "Test#gmail.com",
"mediaType": "EMAIL",
"type": "PRIMARY"
}
],
"addresses": [],
"state": "active",
"title": "AAA",
"username": "Test#gmail.com",
"version": 27,
"authorization": {
"roles": [
{
"id": "01256689-c5ed-43a5-b370-58522402830d",
"name": "AA"
},
{
"id": "1e65b009-9f8f-4eef-9844-83944002c095",
"name": "BBB"
},
{
"id": "8a19f1ff-40e5-45d2-b758-14550a173323",
"name": "CCC"
},
{
"id": "d02250e2-7071-46bf-885b-43edff2d88a6",
"name": "Supervisor"
}
]
}
},
{
"id": "1255",
"name": "BBBB",
"div": {
"id": "AAA",
"name": "DDSAA",
"selfUri": ""
},
"chat": {
"jabberId": "nn"
},
"department": "Shared Services Organization",
"email": "Test#gmail.com",
"primaryContactInfo": [
{
"address": "Test#gmail.com",
"mediaType": "EMAIL",
"type": "PRIMARY"
}
],
"addresses": [],
"state": "active",
"title": "AAA",
"username": "Test#gmail.com",
"version": 27,
"authorization": {
"roles": [
{
"id": "01256689-c5ed-43a5-b370-58522402830d",
"name": "AA"
},
{
"id": "1e65b009-9f8f-4eef-9844-83944002c095",
"name": "BBB"
},
{
"id": "8a19f1ff-40e5-45d2-b758-14550a173323",
"name": "CCC"
},
{
"id": "d02250e2-7071-46bf-885b-43edff2d88a6",
"name": "Tester"
}
]
}
}
]
}
Then, you can use select to narrow down your target objects (here using any to check if at least one of the role names matches your string -- thx #ikegami), then output any part of the resulting object(s):
jq '
.results[]
| select(any(.authorization.roles[]; .name == "Supervisor"))
| {id, name}
'
{
"id": "1231",
"name": "AAAA"
}
Demo
If instead of a JSON output you need raw text, use the -r (or --raw-output) flag, and provide the fields you are interested in:
jq -r '
.results[]
| select(any(.authorization.roles[]; .name == "Supervisor"))
| .id, .name
'
1231
AAAA
Demo
I have a dictionary dct with two sets set1 and set2 of different lengths.
dct ={
"id": "1234",
"set1": [
{
"sub_id": "1234a",
"details": [
{
"sum": "10",
"label": "pattern1"
}
],
},
{
"sub_id": "1234b",
"details": [
{
"sum": "10",
"label": "pattern3"
}
],
}
],
"set2": [
{
"sub_id": "3463a",
"details": [
{
"sum": "20",
"label": "pattern1"
}
],
},
{
"sub_id": "3463b",
"details": [
{
"sum": "100",
"label": "pattern2"
}
],
},
{
"sub_id": "3463c",
"details": [
{
"sum": "100",
"label": "pattern3"
}
],
}
]
}
I need to check for each label if the corresponding sum has changed, and if yes, subtract these.
pairs1=[]
pairs2=[]
for d in dct['set1']:
for dd in d['details']:
pairs1.append((dd['label'],dd['sum']))
for d in dct['set2']:
for dd in d['details']:
pairs2.append((dd['label'],dd['sum']))
result={}
for p in pairs1:
for pp in pairs2:
if p[0] == pp[0]:
result[p[0]]= int(pp[1])-int(p[1])
result
Output something like:
{'pattern1': 10, 'pattern3': 90}
Is there a better way to iterate through the nested dictionary?
I never have worked with json data in R and unfortunately, I was sent a sample of data as:
{
"task_id": "104",
"status": "succeeded",
"metrics": {
"requests_made": 2,
"network_errors": 0,
"unique_locations_visited": 0,
"requests_queued": 0,
"queue_items_completed": 2,
"queue_items_waiting": 0,
"issue_events": 9,
"caption": "",
"progress": 100
},
"message": "",
"issue_events": [
{
"id": "1234",
"type": "issue_found",
"issue": {
"name": "policy not enforced",
"type_index": 123456789,
"serial_number": "123456789183923712",
"origin": "https://test.com",
"path": "/robots.txt",
"severity": "low",
"confidence": "certain",
"caption": "/robots.txt",
"evidence": [
{
"type": "FirstOrderEvidence",
"detail": {
"band_flags": [
"in_band"
]
},
"request_response": {
"url": "https://test.com/robots.txt",
"request": [
{
"type": "DataSegment",
"data": "jaghsdjgasdgaskjdgasdgashdgsahdgasjkdgh==",
"length": 313
}
],
"response": [
{
"type": "DataSegment",
"data": "asudasjdgasaaasgdasgaksjdhgasjdgkjghKGKGgKJgKJgKJGKgh==",
"length": 303
}
],
"was_redirect_followed": false,
"request_time": "1234567890"
}
}
],
"internal_data": "jdfhgjhJHkjhdskfhkjhjs0sajkdfhKHKhkj=="
}
},
{
"id": "1235",
"type": "issue_found",
"issue": {
"name": "certificate",
"type_index": 12345845684,
"serial_number": "123456789165637150",
"origin": "https://test.com",
"path": "/",
"severity": "info",
"confidence": "certain",
"description": "The server description a valid, trusted certificate. This issue is purely informational.<br><br>The server presented the following certificates:<br><br><h4>Server certificate</h4><table><tr><td><b>Issued to:</b> </td><td>test.ie, test.com, www.test.com, www.test.ie</td></tr><tr><td><b>Issued by:</b> </td><td>GeoTrust EV RSA CA 2018</td></tr><tr><td><b>Valid from:</b> </td><td>Tue May 12 00:00:00 UTC 2020</td></tr><tr><td><b>Valid to:</b> </td><td>Tue May 17 12:00:00 UTC 2022</td></tr></table><h4>Certificate chain #1</h4><table><tr><td><b>Issued to:</b> </td><td>GeoTrust EV RSA CA 2018</td></tr><tr><td><b>Issued by:</b> </td><td> High Assurance EV Root CA</td></tr><tr><td><b>Valid from:</b> </td><td>Mon Nov 06 12:22:46 UTC 2017</td></tr><tr><td><b>Valid to:</b> </td><td>Sat Nov 06 12:22:46 UTC 2027</td></tr></table><h4>Certificate chain #2</h4><table><tr><td><b>Issued to:</b> </td><td> High Assurance EV Root CA</td></tr><tr><td><b>Issued by:</b> </td><td> High Assurance EV Root CA</td></tr><tr><td><b>Valid from:</b> </td><td>Fri Nov 10 00:00:00 UTC 2006</td></tr><tr><td><b>Valid to:</b> </td><td>Mon Nov 10 00:00:00 UTC 2031</td></tr></table>",
"caption": "/",
"evidence": [],
"internal_data": "sjhdgsajdggJGJHgjfgjhGJHgjhsdgfgjhGJHGjhsdgfjhsgfdsjfg098867hjhgJHGJHG=="
}
},
{
"id": "1236",
"type": "issue_found",
"issue": {
"name": "without flag set",
"type_index": 1254392,
"serial_number": "12345678965616",
"origin": "https://test.com",
"path": "/robots.txt",
"severity": "info",
"confidence": "certain",
"description": "my description text here....",
"caption": "/robots.txt",
"evidence": [
{
"type": "InformationListEvidence",
"request_response": {
"url": "https://test.com/robots.txt",
"request": [
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh==",
"length": 313
}
],
"response": [
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh=",
"length": 161
},
{
"type": "HighlightSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdf=",
"length": 119
},
{
"type": "DataSegment",
"data": "AasjkdhasjkhkjHKJSDHFJKSDFHKhjkHSKADJFHKhjkhjkh=",
"length": 23
}
],
"was_redirect_followed": false,
"request_time": "178454751191465"
},
"information_items": [
"Other: user_id"
]
}
],
"internal_data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKH=="
}
},
{
"id": "1237",
"type": "issue_found",
"issue": {
"name": "without flag set",
"type_index": 1234567,
"serial_number": "123456789056704",
"origin": "https://test.com",
"path": "/",
"severity": "info",
"confidence": "certain",
"description": "long description here zjkhasdjkh hsajkdhsajkd hasjkdhbsjkdash d",
"caption": "/",
"evidence": [
{
"type": "InformationListEvidence",
"request_response": {
"url": "https://test.com/",
"request": [
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfhsfdsfdsfdsfdsfdsfsdfdsf",
"length": 303
}
],
"response": [
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh==",
"length": 151
},
{
"type": "HighlightSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh=",
"length": 119
},
{
"type": "DataSegment",
"data": "sdfdsfsdfSDFSDFdSFDS546SDFSDFDSFG657=",
"length": 23
}
],
"was_redirect_followed": false,
"request_time": "123541191466"
},
"information_items": [
"Other: user_id"
]
}
],
"internal_data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsd=="
}
},
{
"id": "1238",
"type": "issue_found",
"issue": {
"name": "parameter pollution",
"type_index": 4137000,
"serial_number": "123456789810290176",
"origin": "https://test.com",
"path": "/robots.txt",
"severity": "low",
"confidence": "firm",
"description": "very long description text here...",
"caption": "/robots.txt [URL path filename]",
"evidence": [
{
"type": "FirstOrderEvidence",
"detail": {
"payload": {
"bytes": "Q3jkeiZkcmg8MQ==",
"flags": 0
},
"band_flags": [
"in_band"
]
},
"request_response": {
"url": "https://test.com/%3fhdz%26drh%3d1",
"request": [
{
"type": "DataSegment",
"data": "W1QOIC8=",
"length": 5
},
{
"type": "HighlightSegment",
"data": "WRMnBGR6JTI2ZHJoJTNkMQ==",
"length": 16
},
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfhcvxxcvklxcvjkxclvjxclkvjxcklvjlxckjvlxckjvklxcjvxcklvjxcklvjxckljvlxckjvxcklvjxckljvxcklvjcklxjvcxkl==",
"length": 298
}
],
"response": [
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh==",
"length": 130
},
{
"type": "HighlightSegment",
"data": "Q4jleiZkcmg9MQ==",
"length": 10
},
{
"type": "DataSegment",
"data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh==",
"length": 163
}
],
"was_redirect_followed": false,
"request_time": "51"
}
}
],
"internal_data": "adjkhajksdhaskjdhkjHKJHjkhaskjdhkjasdhKHKJHkjsdhfkjsdhfkjsdhKHJKHjksdfhsdjkfhksdjhKHKJHJKhsdkfjhsdkfjhKHJKHjksdkfjhsdkjfhKHKJHjkhsdkfjhsdkjfhsdjkfhksdjfhKJHKjksdhfsdjkfhksdjfhsdkjhKHJKhsdkfhsdkjfhsdkfhdskjhKHKjhsdfkjhsdjkfh="
}
}
],
"event_logs": [],
"audit_items": []
}
I read it in R using jsonlite:
df_orig <- fromJSON('dast_sample_output.json', flatten= T)
This gives a nested list type R object. I wish to convert this list to a data frame in a tidy format with all the arrays and sub arrays being unnested.
If you run the str(df_orig), you could see the nested data frames in there.
How do I convert it to tidy format?
I tried unnest(), purrr but struggling to get into the tidy format for analysis? Any pointers would be highly appreciated.
Cheers,
use the jsonlite package function fromJSON()
edit:
set option flatten=T
edit2:
use content( x, 'text') before flattening
here is a full example converting to data.table:
get.json <- GET( apicall.text )
get.json.text <- content( get.json , 'text')
get.json.flat <- fromJSON( get.json.text , flatten = T)
dt <- as.data.table( get.json.flat )
I am new to NoSQL and MongoDB, so please don't bash. I have used SQL databases in the past, but am now looking to leverage the scalability of NoSQL. One application that comes to mind is the collection of experimental results, where they are serialized in some manner with a start date, end date, part number, serial number, etc. Along with each experiment, there are many "measurements" collected, but the list of measurements may be unique in each experiment.
I am looking for ideas in how to structure the document to achieve the follow tasks:
1) Query based on date ranges, part numbers, serial numbers
2) See resulting table in a "spreadsheet" table
3) Perform statistical calculats, perhaps with R, on the different "measurements"
An example might look like:
[
{
"_id": {
"$oid": "5e680d6063cb144f9d1be261"
},
"StartDate": {
"$date": {
"$numberLong": "1583841600000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842007000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3102",
"Status": "Acceptable",
"Results": [
{
"Sensor": "Pressure",
"Value": "14.68453",
"Units": "PSIA",
"Flag": "1"
},
{
"Sensor": "Temperature",
"Value": {
"$numberDouble": "68.43"
},
"Units": "DegF",
"Flag": {
"$numberInt": "1"
}
},
{
"Sensor": "Velocity",
"Value": {
"$numberDouble": "12.4"
},
"Units": "ft/s",
"Flag": {
"$numberInt": "1"
}
}
]
},
{
"_id": {
"$oid": "5e68114763cb144f9d1be263"
},
"StartDate": {
"$date": {
"$numberLong": "1583842033000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842434000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3103",
"Status": "Acceptable",
"Results": [
{
"Sensor": "Pressure",
"Value": "14.70153",
"Units": "PSIA",
"Flag": "1"
},
{
"Sensor": "Temperature",
"Value": {
"$numberDouble": "68.55"
},
"Units": "DegF",
"Flag": {
"$numberInt": "1"
}
},
{
"Sensor": "Velocity",
"Value": {
"$numberDouble": "12.7"
},
"Units": "ft/s",
"Flag": {
"$numberInt": "1"
}
}
]
},
{
"_id": {
"$oid": "5e68115f63cb144f9d1be264"
},
"StartDate": {
"$date": {
"$numberLong": "1583842464000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842434000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3104",
"Status": "Acceptable",
"Results": [
{
"Sensor": "Pressure",
"Value": "14.59243",
"Units": "PSIA",
"Flag": "1"
},
{
"Sensor": "Weight",
"Value": {
"$numberDouble": "67.93"
},
"Units": "lbf",
"Flag": {
"$numberInt": "1"
}
},
{
"Sensor": "Torque",
"Value": {
"$numberDouble": "122.33"
},
"Units": "ft-lbf",
"Flag": {
"$numberInt": "1"
}
}
]
}
]
Another approach might be:
[
{
"_id": {
"$oid": "5e680d6063cb144f9d1be261"
},
"StartDate": {
"$date": {
"$numberLong": "1583841600000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842007000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3102",
"Status": "Acceptable",
"Pressure (PSIA)" : "14.68453",
"Pressure - Flag": "1",
"Temperature (degF)": "68.43",
"Temperature - Flag": "1",
"Velocity (ft/s)": "12.4",
"Velocity Flag": "1"
},
{
"_id": {
"$oid": "5e68114763cb144f9d1be263"
},
"StartDate": {
"$date": {
"$numberLong": "1583842033000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842434000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3103",
"Status": "Acceptable",
"Pressure (PSIA)" : "14.70153",
"Pressure - Flag": "1",
"Temperature (degF)": "68.55",
"Temperature - Flag": "1",
"Velocity (ft/s)": "12.7",
"Velocity Flag": "1"
},
{
"_id": {
"$oid": "5e68115f63cb144f9d1be264"
},
"StartDate": {
"$date": {
"$numberLong": "1583842464000"
}
},
"EndDate": {
"$date": {
"$numberLong": "1583842434000"
}
},
"PartNumber": "1Z45NP7X",
"SerialNumber": "U84A3104",
"Status": "Acceptable",
"Pressure (PSIA)" : "14.59243",
"Pressure - Flag": "1",
"Weight (lbf)": "67.93",
"Weight - Flag": "1",
"Torque (ft-lbf)": "122.33",
"Torque - Flag": : "1"
}
]
An example table might look like (probably with correct spacing):
StartDate EndDate PartNumber SerialNumber Pressure 'Pressure - Flag' Temperature 'Temperature - Flag' Velocity 'Velocity - Flag' Torque 'Torque - Flag' Weight 'Weight - Flag'
2020-03-10T12:00:00Z 2020-03-10T12:06:47Z 1Z45NP7X U84A3102 14.68453 1 68.43 1 12.4 1 N/A N/A N/A
N/A
2020-03-10T12:07:13Z 2020-03-10T12:13:54Z 1Z45NP7X U84A3103 14.70153 1 68.55 1 12.7 1 N/A N/A N/A
N/A
2020-03-10T12:07:13Z 2020-03-10T12:13:54Z 1Z45NP7X U84A3104 14.59243 1 N/A N/A N/A N/A 67.93 1 122.33
1
Any thoughts on the best structure? In reality, there might be 200+ "sensor values".
Thanks,
DG
Reporting API v4
I am a developer. I have my clients google adwords and analytics. I have been using adwords and analytics report API for almost a year now.
I am also using https://ga-dev-tools.appspot.com/query-explorer/. The query builder. For comparing if I have retrieve the right amount of data.
I don't know if its an error or not but its acting weird.
Try number 1 using https://ga-dev-tools.appspot.com/query-explorer/
I tried to add 2 metrics and 7 dimensions. This Account ID, contains 1 million data in only 1 month. I know this because I retrieved 1 million in a range of july 25, 2018 - august 16, 2018.
Then, here's the interesting part. I run the query again with the same parameters, it retrieves 5999 results. I did it again it returns 1 million. The results keep changing. I thought its the error in my code but its also happening in the query builder.
What do you guys think? is it a bug or not?
You can try this if you have more than a million data.
I know its not related to coding. But Google Analytics doesn't have forums just like Adwords.
Try number 2 using this link https://developers.google.com/analytics/devguides/reporting/core/v4/rest/v4/reports/batchGet
this is my request
{
"reportRequests": [
{
"dateRanges": [
{
"endDate": "2018-08-16",
"startDate": "2018-07-16"
}
],
"dimensions": [
{
"name": "ga:dimension2"
},
{
"name": "ga:dimension3"
},
{
"name": "ga:dimension1"
},
{
"name": "ga:adPlacementDomain"
}
],
"pageSize": 5,
"viewId": "********",
"samplingLevel": "LARGE",
"metrics": [
{
"expression": "ga:entrances"
},
{
"expression": "ga:newUsers"
}
],
"includeEmptyRows": true
}
]
}
The return of rowCount is sometimes 2111 and then 1000000.
This my response json with 1million result:
{
"reports": [
{
"columnHeader": {
"dimensions": [
"ga:dimension2",
"ga:dimension3",
"ga:dimension1",
"ga:adPlacementDomain"
],
"metricHeader": {
"metricHeaderEntries": [
{
"name": "ga:entrances",
"type": "INTEGER"
},
{
"name": "ga:newUsers",
"type": "INTEGER"
}
]
}
},
"data": {
"rows": [
{
"dimensions": [
"(other)",
"(other)",
"(other)",
"(other)"
],
"metrics": [
{
"values": [
"120834",
"68730"
]
}
]
},
{
"dimensions": [
"1000025873.1532426892",
"1532426891790.o9z84x",
"2018-07-24T11:08:15.449+01:00",
"unknown"
],
"metrics": [
{
"values": [
"0",
"0"
]
}
]
},
{
"dimensions": [
"1000025873.1532426892",
"1532426891790.o9z84x",
"2018-07-24T11:08:17.589+01:00",
"unknown"
],
"metrics": [
{
"values": [
"0",
"0"
]
}
]
},
{
"dimensions": [
"1000025873.1532426892",
"1532426891790.o9z84x",
"2018-07-24T11:08:31.809+01:00",
"unknown"
],
"metrics": [
{
"values": [
"0",
"0"
]
}
]
},
{
"dimensions": [
"1000025873.1532426892",
"1532427045552.p38pk78",
"2018-07-24T11:09:06.43+01:00",
"unknown"
],
"metrics": [
{
"values": [
"0",
"0"
]
}
]
}
],
"totals": [
{
"values": [
"158626",
"90225"
]
}
],
"rowCount": 1000000,
"minimums": [
{
"values": [
"0",
"0"
]
}
],
"maximums": [
{
"values": [
"120834",
"68730"
]
}
],
"isDataGolden": true
},
"nextPageToken": "5"
}
]
}
another response example when i have less 1million results:
{
"reports": [
{
"columnHeader": {
"dimensions": [
"ga:dimension2",
"ga:dimension3",
"ga:dimension1",
"ga:adPlacementDomain"
],
"metricHeader": {
"metricHeaderEntries": [
{
"name": "ga:entrances",
"type": "INTEGER"
},
{
"name": "ga:newUsers",
"type": "INTEGER"
}
]
}
},
"data": {
"rows": [
{
"dimensions": [
"1002211166.1531434756",
"1531762918308.fjnj7pa6",
"2018-07-16T18:41:58.307+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
},
{
"dimensions": [
"1002211166.1531434756",
"1531771001486.jawfrpz8",
"2018-07-16T20:56:41.482+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
},
{
"dimensions": [
"1002211166.1531434756",
"1531772475507.7n4w2qzb",
"2018-07-16T21:21:15.503+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
},
{
"dimensions": [
"1002211166.1531434756",
"1531859165986.zl7we6a5",
"2018-07-17T21:26:05.977+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
},
{
"dimensions": [
"1002211166.1531434756",
"1531859632678.dz7hccsa",
"2018-07-17T21:33:52.673+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
},
{
"dimensions": [
"1002211166.1531434756",
"1531861026792.kw71ngx9",
"2018-07-17T21:42:31.667+01:00",
"mobileapp::2-com.forsbit.spider"
],
"metrics": [
{
"values": [
"1",
"0"
]
}
]
}
],
"totals": [
{
"values": [
"2111",
"233"
]
}
],
"rowCount": 2112,
"minimums": [
{
"values": [
"0",
"0"
]
}
],
"maximums": [
{
"values": [
"1",
"1"
]
}
],
"isDataGolden": true
},
"nextPageToken": "6"
}
]
}
I am assuming that you have kept all the queries intact. Double check just to make sure.
Second step would be to check for sampling. Check the field samplingSpaceSizes and samplesReadCounts in the response for sampling. If these fields were not defined that means no sampling was introduced.