below is my Kusto query, it takes 2+ mins in lens dashboard to show the data, I have optimized my query to have materialize() in let statements and contains with has. is there anyother way to optimize it in a better way.
let C_masfunteams = materialize(find withsource=source in (cluster(X).database('oci-*').['TextFileLogs']) where AttemptedIngestTime > ago(7d)
and FileLineContent has "<li>Build Number:" | summarize min(AttemptedIngestTime) by source, FileLineContent);//, AttemptedIngestTime
let n = C_masfunteams | extend databaseName = extract(#"""(oci-[^""]*)""", 1, source)
| extend BuildNumber = extract(#"([A-Z]\w*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
| extend StampVersion = extract(#"([0-9]\d*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
|extend cluster ='masfunteams'
| project BuildNumber , StampVersion , min_AttemptedIngestTime
| summarize NumberOfRuns=count() , ingestedtime = min(min_AttemptedIngestTime) by BuildNumber,StampVersion;
let C_masfun= materialize(find withsource=source in (cluster(Y).database('oci-*').['TextFileLogs']) where AttemptedIngestTime > ago(7d)
and FileLineContent has "<li>Build Number:" | summarize min(AttemptedIngestTime) by source, FileLineContent);//, AttemptedIngestTime
let m = C_masfun | extend databaseName = extract(#"""(oci-[^""]*)""", 1, source)
| extend BuildNumber = extract(#"([A-Z]\w*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
| extend StampVersion = extract(#"([0-9]\d*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
|extend cluster ='masfunteams'
| project BuildNumber , StampVersion , min_AttemptedIngestTime
| summarize NumberOfRuns=count() , ingestedtime = min(min_AttemptedIngestTime) by BuildNumber,StampVersion;
let C_masvaas = materialize(find withsource=source in (cluster(z).database('oci-*').['TextFileLogs']) where AttemptedIngestTime > ago(7d)
and FileLineContent has "<li>Build Number:" | summarize min(AttemptedIngestTime) by source, FileLineContent);//, AttemptedIngestTime
let o= C_masvaas | extend databaseName = extract(#"""(oci-[^""]*)""", 1, source)
| extend BuildNumber = extract(#"([A-Z]\w*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
| extend StampVersion = extract(#"([0-9]\d*\.[0-9]\d*\.[0-9]\d*\.[0-9]\d*)",1,FileLineContent)
|extend cluster ='masfunteams'
| project BuildNumber , StampVersion , min_AttemptedIngestTime
| summarize NumberOfRuns=count() , ingestedtime = min(min_AttemptedIngestTime) by BuildNumber,StampVersion;
union isfuzzy=true m,n,o
| summarize Ingestedtime =min(ingestedtime) by BuildNumber,StampVersion
Hi the query is quite complex and without running it on the actual cluster it is hard to figure out what is the expected results. So here are a few tips:
Consider starting the union operator as the first operator with a uniform logic for the filtering, parsing and summarize operations
Consider removing the materialize() if you are only using each dataset only once
Consider removing the 'find' as you are not doing search across multiple columns, If you are using it to get the source table in your output records set, consider adding "withsource" to the union statement
If possible consider using the 'parse' operator instead of the regular expression
Hope this helps!
Related
I am new to Kusto Query language. Requirement is to alert when the continuous 15 minute value of machine status is 1.
I have two columns with column1:(timestamp in every second) and column2:machine status(values 1 and 0).How can I use a sliding window to find if the machine is 1 for continuous 15 minutes.
Currently I have used the bin function, but it does not seem to be the proper one.
summarize avg_value = avg(status) by customer, machine,bin(timestamp,15m)
What could be the better solution for this.
Thanks in advance
Here is another option using time series functions:
let dt = 1s;
let n_bins = tolong(15m/dt);
let coeffs = repeat(1, n_bins);
let T = view(M:string) {
range Timestamp from datetime(2022-01-11) to datetime(2022-01-11 01:00) step dt
| extend machine = M
| extend status = iif(rand()<0.002, 0, 1)
};
union T("A"), T("B")
| make-series status=any(status) on Timestamp step dt by machine
| extend rolling_status = series_fir(status, coeffs, false)
| extend alerts = series_equals(rolling_status, n_bins)
| project machine, Timestamp, alerts
| mv-expand Timestamp to typeof(datetime), alerts to typeof(bool)
| where alerts == 1
You can also do it using the scan operator.
thanks
Here is one way to do it, the example uses generated data, hopefully it fits in your scenario:
let view = range x from datetime(2022-01-10 13:00:10) to datetime(2022-01-10 13:10:10) step 1s
| extend status = iif(rand()<0.01, 0, 1)
| extend current_sum = row_cumsum(status)
| extend prior_sum = prev(current_sum, 15)
| extend should_alert = (current_sum-prior_sum != 15 and isnotempty(prior_sum))
If you have multiple machines you need to sort it first by machines and restart the row_cumsum operation:
let T = view(M:string) {
range Timestamp from datetime(2022-01-10 13:00:10) to datetime(2022-01-10 13:10:10) step 1s
| extend machine = M
| extend status = iif(rand()<0.01, 0, 1)
};
union T("A"), T("B")
| sort by machine asc, Timestamp asc
| extend current_sum = row_cumsum(status, machine != prev(machine))
| extend prior_sum = iif(machine == prev(machine, 15), prev(current_sum, 15), int(null))
| extend should_alert = (current_sum-prior_sum != 15 and isnotempty(prior_sum))
We're running into Kusto has_any limit of 10K.
Sample code
// Query: Get failed operations for migrated apps
let migrationsTimeDiff = 15d;
let operationsDiffTime = 24h + 1m;
let migratedApps = FirstTable
| where TimeStamp >= ago(migrationsTimeDiff)
| where MetricName == "JobSucceeded"
| project
MigrationTime = PreciseTimeStamp,
appName = tostring(parse_json(Annotations).AppName)
| project appName;
SecondTable
| where TimeStamp > ago(operationsDiffTime)
| where Url has_any (appName)
| where Result == "Fail"
Is there a way to restructure the query via joins?
Alternatively is it possible to loop in batches of 10k?
Thanks for reading!
If Url is an exact match to appName, then you should use:
SecondTable
| where TimeStamp > ago(operationsDiffTime)
| where Url in (appName) // 'in' instead of 'has_any'
| where Result == "Fail"
Otherwise, you'll need to extract the application name from the Url using extend, and then use in like I suggested above, so your query will look like this:
SecondTable
| where TimeStamp > ago(operationsDiffTime)
| extend ExtractedAppNameFromUrl = ...
| where ExtractedAppNameFromUrl in (appName) // 'in' instead of 'has_any'
| where Result == "Fail"
I have Sessions table
Sessions
|Timespan|Name |No|
|12:00:00|Start|1 |
|12:01:00|End |2 |
|12:02:00|Start|3 |
|12:04:00|Start|4 |
|12:04:30|Error|5 |
I need to extract from it duration of each session using KQL (but if you could give me suggestion how I can do it with some other query language it would be also very helpful). But if next row after start is also start, it means session was abandoned and we should ignore it.
Expected result:
|Duration|SessionNo|
|00:01:00| 1 |
|00:00:30| 4 |
You can try something like this:
Sessions
| order by No asc
| extend nextName = next(Name), nextTimestamp = next(timestamp)
| where Name == "Start" and nextName != "Start"
| project Duration = nextTimestamp - timestamp, No
When using the operator order by, you are getting a Serialized row set, which then you can use operators such as next and prev. Basically you are seeking rows with No == "Start" and next(Name) == "End", so this is what I did,
You can find this query running at Kusto Samples open database.
let Sessions = datatable(Timestamp: datetime, Name: string, No: long) [
datetime(12:00:00),"Start",1,
datetime(12:01:00),"End",2,
datetime(12:02:00),"Start",3,
datetime(12:04:00),"Start",4,
datetime(12:04:30),"Error",5
];
Sessions
| order by No asc
| extend Duration = iff(Name != "Start" and prev(Name) == "Start", Timestamp - prev(Timestamp), timespan(null)), SessionNo = prev(No)
| where isnotnull(Duration)
| project Duration, SessionNo
I have the following query in Application Insights where I run the parsejson function multiple times in the same query.
Is it possible to reuse the data from the parsejson() function after the first invocation? Right now I call it three times in the query. I am trying to see if calling it just once might be more efficient.
EventLogs
| where Timestamp > ago(1h)
and tostring(parsejson(tostring(Data.JsonLog)).LogId) =~ '567890'
| project Timestamp,
fileSize = toint(parsejson(tostring(Data.JsonLog)).fileSize),
pageCount = tostring(parsejson(tostring(Data.JsonLog)).pageCount)
| limit 10
You can use extend for that:
EventLogs
| where Timestamp > ago(1h)
| extend JsonLog = parsejson(tostring(Data.JsonLog)
| where tostring(JsonLog.LogId) =~ '567890'
| project Timestamp,
fileSize = toint(JsonLog.fileSize),
pageCount = tostring(JsonLog.pageCount)
| limit 10
Assuming I have a definition of a user I can calculate sum of all daily users and all monthly users.
customEvents
| where timestamp > ago(30d)
| where <condition>
| summarize by <user>, bin(timestamp, 1d)
| summarize count() by bin(timestamp, 1d)
| summarize DAU=sum(count_)
customEvents
| where timestamp > ago(30d)
| where <condition>
| summarize by <user>
| MAU=30*count
The question is how to calculate DAU/MAU? Some join magic?
Edit:
There is a much easier way to calculate usage metrics now - "evaluate activity_engagement":
union *
| where timestamp > ago(90d)
| evaluate activity_engagement(user_Id, timestamp, 1d, 28d)
| project timestamp, Dau_Mau=activity_ratio*100
| render timechart
-------
The DAU is really stright forward in Analytics - just use a dcount.
The tricky part of course is calculating the 28-day rolling MAU.
I wrote a post detailing exactly how to calculate stickiness in app analytics a few weeks back - The trick is that you have to use hll() and hll_merge() to calculate the intermediate dcount results for each day, and then merge them together.
The end result looks like this:
let start=ago(60d);
let period=1d;
let RollingDcount = (rolling:timespan)
{
pageViews
| where timestamp > start
| summarize hll(user_Id) by bin(timestamp, period)
| extend periodKey = range(bin(timestamp, period), timestamp+rolling, period)
| mvexpand periodKey
| summarize rollingUsers = dcount_hll(hll_merge(hll_user_Id)) by todatetime(periodKey)
};
RollingDcount(28d)
| join RollingDcount(0d) on periodKey
| where periodKey < now() and periodKey > start + 28d
| project Stickiness = rollingUsers1 *1.0/rollingUsers, periodKey
| render timechart
Looks like this query does it:
let query = customEvents
| where timestamp > datetime("2017-02-01T00:00:00Z") and timestamp < datetime("2017-03-01T00:00:00Z")
| where **<optional condition>**;
let DAU = query
| summarize by **<user>**, bin(timestamp, 1d)
| summarize count() by bin(timestamp, 1d)
| summarize DAU=sum(count_), _id=1;
let MAU = query
| summarize by **<user>**
| summarize MAU=count(), _id=1;
DAU | join (MAU) on _id
| project ["DAU/MAU"] = todouble(DAU)/30/MAU*100, ["Sum DAU"] = DAU, ["MAU"] = MAU
Any suggestions how to calculate it per last few months?
Zaki, your queries calculate a point in time MAU/DAU. If you need a rolling MAU you can use the HLL approach suggested by Asaf. Or the following which is my preferred rolling MAU which is using make-series and fir(). You can play with it hands on using this link to the analytics demo portal.
The two approaches require some time to get used to... and from what I have seen both are blazing fast. One advantage to the make-series and fir() approach is that it is 100% accurate while the HLL approach is heuristic and has some level of error. Another bonus is that it is really easy to configure the level of user engagement that would make the user eligible for the count.
let endtime=endofday(datetime(2017-03-01T00:00:00Z));
let window=60d;
let starttime=endtime-window;
let interval=1d;
let user_bins_to_analyze=28;
let moving_sum_filter=toscalar(range x from 1 to user_bins_to_analyze step 1 | extend v=1 | summarize makelist(v));
let min_activity=1;
customEvents
| where timestamp > starttime
| where customDimensions["sourceapp"]=="ai-loganalyticsui-prod"
| where (name == "Checkout")
| where user_AuthenticatedId <> ""
| make-series UserClicks=count() default=0 on timestamp in range(starttime, endtime-1s, interval) by user_AuthenticatedId
// create a new column containing a sliding sum. Passing 'false' as the last parameter to fir() prevents normalization of the calculation by the size of the window.
| extend RollingUserClicks=fir(UserClicks, moving_sum_filter, false)
| project User_AuthenticatedId=user_AuthenticatedId , RollingUserClicksByDay=zip(timestamp, RollingUserClicks)
| mvexpand RollingUserClicksByDay
| extend Timestamp=todatetime(RollingUserClicksByDay[0])
| extend RollingActiveUsersByDay=iff(toint(RollingUserClicksByDay[1]) >= min_activity, 1, 0)
| summarize sum(RollingActiveUsersByDay) by Timestamp
| where Timestamp > starttime + 28d
| render timechart