Get current CPU usage on Deno - cpu-usage

There are process.cpuUsage() function on NodeJS and Deno.memoryUsage() function to get memory usage on Deno
also there are a process module for Deno at https://deno.land/std#0.123.0/node/process.ts
but its not including something like .cpuUsage()
so is there are a way to get current cpu usage on Deno ?

At the time I write this answer, it's not natively possible to obtain sampled CPU load data in Deno.
If you want this data now, you can get it one of two ways:
Using the Foreign Function Interface API
Use the subprocess API
I'll provide a code sample below for how to get the data by installing Node.js and using the second method:
node_eval.ts:
type MaybePromise<T> = T | Promise<T>;
type Decodable = Parameters<TextDecoder['decode']>[0];
const decoder = new TextDecoder();
async function trimDecodable (decodable: MaybePromise<Decodable>): Promise<string> {
return decoder.decode(await decodable).trim();
}
/**
* Evaluates the provided script using Node.js (like
* [`eval`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/eval))
* and returns `stdout`.
*
* Uses the resolved version of `node` according to the host environemnt.
*
* Requires `node` to be available in `$PATH`.
* Requires permission `--allow-run=node`.
*/
export async function evaluateUsingNodeJS (script: string): Promise<string> {
const cmd = ['node', '-e', script];
const proc = Deno.run({cmd, stderr: 'piped', stdout: 'piped'});
const [{code}, stderr, stdout] = await Promise.all([
proc.status(),
trimDecodable(proc.stderrOutput()),
trimDecodable(proc.output()),
]);
if (code !== 0) {
const msg = stderr ? `\n${stderr}` : '';
throw new Error(`The "node" subprocess exited with a non-zero status code (${code}). If output was emitted to stderr, it is included below.${msg}`);
}
return stdout;
}
mod.ts:
import {evaluateUsingNodeJS} from './node_eval.ts';
export const cpuTimesKeys: readonly (keyof CPUTimes)[] =
['user', 'nice', 'sys', 'idle', 'irq'];
export type CPUTimes = {
/** The number of milliseconds the CPU has spent in user mode */
user: number;
/**
* The number of milliseconds the CPU has spent in nice mode
*
* `nice` values are POSIX-only.
* On Windows, the nice values of all processors are always `0`.
*/
nice: number;
/** The number of milliseconds the CPU has spent in sys mode */
sys: number;
/** The number of milliseconds the CPU has spent in idle mode */
idle: number;
/** The number of milliseconds the CPU has spent in irq mode */
irq: number;
};
export type CPUCoreInfo = {
model: string;
/** in MHz */
speed: number;
times: CPUTimes;
};
/**
* Requires `node` to be available in `$PATH`.
* Requires permission `--allow-run=node`.
*/
export async function sampleCPUsUsingNodeJS (): Promise<CPUCoreInfo[]> {
const script = `console.log(JSON.stringify(require('os').cpus()));`;
const stdout = await evaluateUsingNodeJS(script);
try {
return JSON.parse(stdout) as CPUCoreInfo[];
}
catch (ex) {
const cause = ex instanceof Error ? ex : new Error(String(ex));
throw new Error(`The "node" subprocess output couldn't be parsed`, {cause});
}
}
/**
* (Same as `CPUCoreInfo`, but) aliased in recognition of the transfromation,
* in order to provide JSDoc info regarding the transformed type
*/
export type TransformedCoreInfo = Omit<CPUCoreInfo, 'times'> & {
/** Properties are decimal percentage of total time */
times: Record<keyof CPUCoreInfo['times'], number>;
};
/** Converts each time value (in ms) to a decimal percentage of their sum */
export function coreInfoAsPercentages (coreInfo: CPUCoreInfo): TransformedCoreInfo {
const timeEntries = Object.entries(coreInfo.times) as [
name: keyof CPUCoreInfo['times'],
ms: number,
][];
const sum = timeEntries.reduce((sum, [, ms]) => sum + ms, 0);
for (const [index, [, ms]] of timeEntries.entries()) {
timeEntries[index][1] = ms / sum;
}
const times = Object.fromEntries(timeEntries) as TransformedCoreInfo['times'];
return {...coreInfo, times};
}
example.ts:
import {
coreInfoAsPercentages,
cpuTimesKeys,
sampleCPUsUsingNodeJS,
type CPUCoreInfo,
} from './mod.ts';
function anonymizeProcessorAttributes <T extends CPUCoreInfo>(coreInfoArray: T[]): T[] {
return coreInfoArray.map(info => ({
...info,
model: 'REDACTED',
speed: NaN,
}));
}
// Get the CPU info
const cpuCoreInfoArr = await sampleCPUsUsingNodeJS();
// Anonymizing my personal device details (but you would probably not use this)
const anonymized = anonymizeProcessorAttributes(cpuCoreInfoArr);
// JSON for log data
const jsonLogData = JSON.stringify(anonymized);
console.log(jsonLogData);
// Or, for purely visual inspection,
// round the percentages for greater scannability...
const roundedPercentages = anonymized.map(coreInfo => {
const asPercentages = coreInfoAsPercentages(coreInfo);
for (const key of cpuTimesKeys) {
asPercentages.times[key] = Math.round(asPercentages.times[key] * 100);
}
return asPercentages;
});
// and log in tabular format
console.table(roundedPercentages.map(({times}) => times));
In the console:
% deno run --allow-run=node example.ts
[{"model":"REDACTED","speed":null,"times":{"user":2890870,"nice":0,"sys":2290610,"idle":17913530,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":218270,"nice":0,"sys":188200,"idle":22687790,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":2509660,"nice":0,"sys":1473010,"idle":19111680,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":221630,"nice":0,"sys":174140,"idle":22698480,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":2161140,"nice":0,"sys":1086970,"idle":19846200,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":221800,"nice":0,"sys":157620,"idle":22714800,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":1905230,"nice":0,"sys":897140,"idle":20291910,"irq":0}},{"model":"REDACTED","speed":null,"times":{"user":224060,"nice":0,"sys":146460,"idle":22723700,"irq":0}}]
┌───────┬──────┬──────┬─────┬──────┬─────┐
│ (idx) │ user │ nice │ sys │ idle │ irq │
├───────┼──────┼──────┼─────┼──────┼─────┤
│ 0 │ 13 │ 0 │ 10 │ 78 │ 0 │
│ 1 │ 1 │ 0 │ 1 │ 98 │ 0 │
│ 2 │ 11 │ 0 │ 6 │ 83 │ 0 │
│ 3 │ 1 │ 0 │ 1 │ 98 │ 0 │
│ 4 │ 9 │ 0 │ 5 │ 86 │ 0 │
│ 5 │ 1 │ 0 │ 1 │ 98 │ 0 │
│ 6 │ 8 │ 0 │ 4 │ 88 │ 0 │
│ 7 │ 1 │ 0 │ 1 │ 98 │ 0 │
└───────┴──────┴──────┴─────┴──────┴─────┘

You can use https://deno.land/std#0.123.0/node/os.ts where cpus() gives CPUCoreInfo[]

Related

terraform public key & private key adding error as well as file not upload in the instance

resource "aws_instance" "dove-web" {
ami = var.AMIS[var.REGION]
instance_type = "t2.micro"
subnet_id = aws_subnet.dove-pub-1.id
key_name = aws_key_pair.rsa.key_name
vpc_security_group_ids = [aws_security_group.dove_stack_sg.id]
tags = {
Name = "cool"
}
provisioner "file" {
source = "web.sh"
destination = "/home/ubuntu/web.sh"
}
provisioner "remote-exec" {
inline = [
"chmod +x /home/ubuntu/web.sh",
"sudo /home/ubuntu/web.sh"
]
}
# privat key
resource "tls_private_key" "rsa" {
algorithm = "RSA"
rsa_bits = 4096
}
# public key
resource "aws_key_pair" "rsa" {
key_name = var.PUB_key
public_key = tls_private_key.rsa.public_key_openssh
provisioner "local-exec" { # Generate "terraform-key-pair.pem" in current directory
command = <<-EOT
echo '${tls_private_key.rsa.private_key_pem}' > ./'${var.PUB_key}'.pem
chmod 400 ./'${var.PUB_key}'.pem
EOT
}
}
# store key localy
/*resource "local_file" "TF-key" {
content = tls_private_key.rsa.private_key_pem
filename = "tfkey"
}
*/
}
output "PublicIP" {
value = aws_instance.dove-web.public_ip
}
$ terraform validate
╷
│ Error: Reference to undeclared resource
│
│ on instance.tf line 6, in resource "aws_instance" "dove-web":
│ 6: key_name = aws_key_pair.rsa.key_name
│
│ A managed resource "aws_key_pair" "rsa" has not been declared in the root module.

How to set static ip, gateway, dns1 and 2 on android 10?

WifiConfiguration is deprecated and stopped working in API 29
wifiManager.configuredNetworks now returns null
On previous android version I was able to use the below solution using WifiConfiguration
`fun setStaticIpConfiguration(manager: WifiManager, config: WifiConfiguration, ipAddress: InetAddress?, prefixLength: Int, gateway: InetAddress, dns: Array<InetAddress?>) { // First set up IpAssignment to STATIC.
val ipAssignment = getEnumValue("android.net.IpConfiguration\$IpAssignment", "STATIC")
callMethod(config, "setIpAssignment", arrayOf("android.net.IpConfiguration\$IpAssignment"), arrayOf(ipAssignment))
// Then set properties in StaticIpConfiguration.
val staticIpConfig = newInstance("android.net.StaticIpConfiguration")
val linkAddress = newInstance("android.net.LinkAddress", arrayOf(InetAddress::class.java, Int::class.javaPrimitiveType), arrayOf(ipAddress, prefixLength))
setField(staticIpConfig, "ipAddress", linkAddress)
setField(staticIpConfig, "gateway", gateway)
getField(staticIpConfig, "dnsServers", ArrayList::class.java).clear()
for (i in dns.indices) getField(staticIpConfig, "dnsServers", ArrayList::class.java).add(dns[i] as Nothing)
callMethod(config, "setStaticIpConfiguration", arrayOf("android.net.StaticIpConfiguration"), arrayOf(staticIpConfig))
val netId = manager.updateNetwork(config)
val result = netId != -1
if (result) {
val isDisconnected = manager.disconnect()
val configSaved = manager.saveConfiguration()
val isEnabled = manager.enableNetwork(config.networkId, true)
val isReconnected = manager.reconnect()
}
}
#Throws(IllegalAccessException::class, IllegalArgumentException::class, NoSuchFieldException::class)
private fun setField(`object`: Any, fieldName: String, value: Any) {
val field = `object`.javaClass.getDeclaredField(fieldName)
field[`object`] = value
}
`
The android documentation suggest NetworkSpecifier and WifiNetworkSuggestion should be used to replace WifiConfiguration but there are no methods to set ip addresses in these classes.
From API level 33 android introduced StaticIpConfiguration but my device is running on android 10

aws_wafv2_rule_group Rate based rule

I am struggling to create a rate based wafv2 rule group; i am getting below error when doing a plan.
not sure is this feature available in terraform till now. below is my code and error:
I have also upgrade terraform and provider registry:
Terraform v1.0.9; on windows_amd64; provider registry.terraform.io/hashicorp/aws v3.63.0
terraform code:
name = "test-rulegroup-ratelimit"
scope = "REGIONAL"
capacity = 5
rule {
name = "test-rulegroup-ratelimit"
priority = 1
action {
count {}
}
statement {
rate_based_statement {
limit = 9999
aggregate_key_type = "IP"
}
}
visibility_config {
cloudwatch_metrics_enabled = false
metric_name = "test-rulegroup-ratelimit"
sampled_requests_enabled = true
}
}
visibility_config {
cloudwatch_metrics_enabled = false
metric_name = "test-rulegroup-ratelimit"
sampled_requests_enabled = true
}
}
Error:
│
│ on r_rulegroup.tf line 15, in resource "aws_wafv2_rule_group" "test-rulegroup-ratelimit":
│ 15: rate_based_statement {
│
│ Blocks of type "rate_based_statement" are not expected here.""
I cannot find anything specific for wafv2 just like classic waf:
https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/waf_rate_based_rule

How can I import a module's type into another module interface declaration?

I'm trying to create a module interface declaration that uses another module's declaration, but it does not work well. Here is an MCVE. I know that I can put type annotations directly on the local modules, but I'm trying to depict what I experience with third-party modules.
.flowconfig:
[ignore]
[include]
[libs]
lib
[options]
[version]
^0.27.0
foo.js:
export class Foo {
foo() {
return 'foo';
}
}
bar.js:
import { Foo } from './foo';
export class Bar extends Foo {
bar() {
return 'bar';
}
makeFoo() {
return new Foo();
}
}
lib/foo.js:
declare module './foo' {
declare class Foo {
foo(): string;
}
}
lib/bar.js:
import { Foo } from './foo';
// Same result if:
// import { Foo } from '../foo';
declare module './bar' {
declare class Bar extends Foo {
bar(): string;
makeFoo(): Foo;
}
}
index.js:
/* #flow */
import { Foo } from './foo';
import { Bar } from './bar';
const bar = new Bar();
const b: number = bar.bar(); // A. wrong const type
const bf: number = bar.foo(); // B. wrong const type
bar.typo(); // C. no such method
const foo = bar.makeFoo();
foo.foo();
foo.bar(); // D. no such method
And flow's result is:
index.js:6
6: const b: number = bar.bar(); // wrong const type
^^^^^^^^^ call of method `bar`
6: const b: number = bar.bar(); // wrong const type
^^^^^^^^^ string. This type is incompatible with
6: const b: number = bar.bar(); // wrong const type
^^^^^^ number
Found 1 error
I expect 4 errors (A, B, C and D in index.js) but get only A. It seems that I am failing to import Foo properly in lib/bar.js so that Foo becomes something like a wildcard.
Is there any way to properly import a module's type into another interface declaration? Am I missing something? Any help is appreciated.
[EDIT]
I see some declarations in flow-typed use React$Component, which is declared globally, without importing it. But I'd like to use a type in a module like Component in react module.
I don't think you are supposed to create declarations files of your components since Flow directly use the sources. Simply import the classes when you need a reference to a type.
In your current example, you simply miss the /* #flow */ on the top of your files (bar.js, foo.js).
Unfortunately, Flow does not support extending/overriding flow/lib types:
https://github.com/facebook/flow/issues/396
What I've begun to do is:
flow-typed install express#4.x.x
Move express_v4.x.x.js from flow-typed/npm/ to flow-typed/ (outside flow-typed/npm/ so it won't be overwritten by future flow-typed installs, and inside flow-typed/ so flow will automatically make declare blah statements global)
Right below the declare class express$Request... (so it's easy to find and so it's above where it's used inside declare module..., I put:
declare class express$Request extends express$Request {
user: any;
isAuthenticated(): boolean;
session: {
loginForwardUrl: ?string;
};
}
I do this instead of putting my custom props on the original class so that it's easy to see which props are custom.

Wireshark V2 plugin info column resets after applying filter

I have a basic Wireshark plugin that was originally written for Wireshark V1 and I'm currently trying to port it to V2.
The issue I'm currently having is that when the plugin is run in wireshark-qt-release the plugin starts fine and all the necessary information is displayed but once a filter is set the information contained within the info column gets cleared. The info column stays empty after clearing the filter also.
The packet type variable used to set the string found in the info column is also added to the Header tree of the packet being dissected. This stays set with or without a filter being set.
Built on branch master-2.0, branch is up-to-date.
Built with MSVC 2013 and I get no errors or warnings.
Have also enabled the debug console in Wireshark but get nothing out but that might be because I can't adjust the debug level which is currently set to 28.
Works fine in the build of wireshark-gtk2 built within the same check out.
Any help appreciated.
/* packet-trcp.c
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <stdio.h>
#include <epan/dissectors/packet-tcp.h>
#define PROTO_TAG_TRCP "TRCP"
#define MAGIC_NUMBER 0x111111
#define FRAME_HEADER_LEN 8
#define TRCP_PORT 1111
static int proto_trcp = -1;
static dissector_handle_t data_handle = NULL;
static dissector_handle_t trcp_handle = NULL;
static void dissect_trcp(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree);
static int dissect_trcp_message(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree, void *);
static const value_string packet_type_names[] =
{
{ 0, "Invalid message" },
{ 1, "Connection Request" },
{ 2, "Connection Response" },
{ 3, "Disconnect" },
{ 4, "File Header" },
{ 5, "File Chunk" },
{ 6, "Cancel File Transfer" },
{ 7, "Firmware Imported" },
{ 8, "Alert Message" },
{ 9, "Restore Factory Settings" },
{ 10, "Format Internal Storage" },
{ 11, "Beacon" },
{ 12, "Shutdown" },
{ 0, NULL }
};
static gint hf_trcp_header = -1;
static gint hf_trcp_magic = -1;
static gint hf_trcp_length = -1;
static gint hf_trcp_type = -1;
static gint hf_trcp_data = -1;
static gint ett_trcp = -1;
static gint ett_trcp_header = -1;
static gint ett_trcp_data = -1;
//-----------------------------------------------------------------------------------------------------------------------
void proto_reg_handoff_trcp(void)
{
static gboolean initialized = FALSE;
if (!initialized)
{
data_handle = find_dissector("data");
trcp_handle = create_dissector_handle(dissect_trcp, proto_trcp);
dissector_add_uint("tcp.port", TRCP_PORT, trcp_handle);
initialized = TRUE;
}
}
//-----------------------------------------------------------------------------------------------------------------------
void proto_register_trcp (void)
{
static hf_register_info hf[] =
{
{&hf_trcp_header,
{"Header", "trcp.header", FT_NONE, BASE_NONE, NULL, 0x0, "TRCP Header", HFILL }},
{&hf_trcp_magic,
{"Magic", "trcp.magic", FT_UINT32, BASE_HEX, NULL, 0x0, "Magic Bytes", HFILL }},
{&hf_trcp_length,
{"Package Length", "trcp.len", FT_UINT16, BASE_DEC, NULL, 0x0, "Package Length", HFILL }},
{&hf_trcp_type,
{"Type", "trcp.type", FT_UINT16, BASE_DEC, VALS(packet_type_names), 0x0, "Package Type", HFILL }},
{&hf_trcp_data,
{"Data", "trcp.data", FT_NONE, BASE_NONE, NULL, 0x0, "Data", HFILL }}
};
static gint *ett[] =
{
&ett_trcp,
&ett_trcp_header,
&ett_trcp_data
};
proto_trcp = proto_register_protocol ("TRCP Protocol", "TRCP", "trcp");
proto_register_field_array (proto_trcp, hf, array_length (hf));
proto_register_subtree_array (ett, array_length (ett));
register_dissector("trcp", dissect_trcp, proto_trcp);
}
//-----------------------------------------------------------------------------------------------------------------------
static guint get_trcp_message_len(packet_info * pinfo, tvbuff_t * tvb, int offset)
{
guint plen;
plen = tvb_get_ntohs(tvb, offset + 6);
// Add the header length to the data length to get the total packet length
plen += FRAME_HEADER_LEN;
return plen;
}
//-----------------------------------------------------------------------------------------------------------------------
static void dissect_trcp(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree)
{
// According to - 9.4.2. How to reassemble split TCP Packets
tcp_dissect_pdus(tvb, pinfo, tree, // Hand over from above
TRUE, // Reassemble packet or not
FRAME_HEADER_LEN, // Smallest amount of data required to determine message length (8 bytes)
get_trcp_message_len, // Function pointer to a method that returns message length
dissect_trcp_message, // Function pointer to real message dissector
NULL);
}
//-----------------------------------------------------------------------------------------------------------------------
static int dissect_trcp_message(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree, void * data)
{
proto_item * trcp_item = NULL;
proto_item * trcp_sub_item_header = NULL;
proto_item * trcp_sub_item_data = NULL;
proto_tree * trcp_tree = NULL;
proto_tree * trcp_header_tree = NULL;
proto_tree * trcp_data_tree = NULL;
guint32 magic = 0;
guint32 offset = 0;
guint32 length_tvb = 0;
guint16 type = 0;
guint16 length = 0;
col_set_str(pinfo->cinfo, COL_PROTOCOL, PROTO_TAG_TRCP);
col_clear(pinfo->cinfo, COL_INFO);
if (tree)
{
trcp_item = proto_tree_add_item(tree, proto_trcp, tvb, 0, -1, FALSE);
trcp_tree = proto_item_add_subtree(trcp_item, ett_trcp);
trcp_sub_item_header = proto_tree_add_item(trcp_tree, hf_trcp_header, tvb, offset, -1, FALSE);
trcp_header_tree = proto_item_add_subtree(trcp_sub_item_header, ett_trcp_header);
/*
4 bytes for magic number
2 bytes for packet type
2 bytes for data length
*/
// Add Magic to header tree
proto_tree_add_item(trcp_header_tree, hf_trcp_magic, tvb, offset, 4, FALSE);
offset += 4;
// Get the type byte
type = tvb_get_ntohs(tvb, offset);
// Add Type to header tree
proto_tree_add_uint(trcp_header_tree, hf_trcp_type, tvb, offset, 2, type);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "%s", val_to_str(type, packet_type_names, "Unknown Type:0x%02x"));
col_set_fence(pinfo->cinfo, COL_INFO);
// Add Length to header tree
length = tvb_get_ntohs(tvb, offset);
proto_tree_add_uint(trcp_header_tree, hf_trcp_length, tvb, offset, 2, length);
offset += 2;
if (length)
{
// Data
trcp_sub_item_data = proto_tree_add_item(trcp_tree, hf_trcp_data, tvb, offset, -1, FALSE);
trcp_data_tree = proto_item_add_subtree(trcp_sub_item_data, ett_trcp_data);
}
}
return tvb_captured_length(tvb);
}
You are setting the column only if the tree argument is non-NULL.
DO NOT DO THAT.
Always set it, regardless of whether tree is null or not. There is no guarantee that, if your dissector is called in order to provide column information, tree will be non-null; we have never provided such a guarantee, and we will never provide such a guarantee.

Resources