Best practice to fetch data after the partial update through server streaming on gRPC - grpc

I am making Task List using Go, gRPC-Web and Postgres.
How can I fetch the data with server streaming after updating exist data?
With the following data,
Post data with Task{ id: 1, name: 'abc1' }
Post data with Task{ id: 2, name: 'abc2' }
Post data with Task{ id: 3, name: 'abc3' }
when I call GetTasks, I get the 3 data. However, if Task{ id: 2, name: 'abc2' } is updated as Task{ id: 2, name: 'new abc2' } by UpdateTask, how can I get the new data?
Does GetTasks fetch this automatically? or Do I need to do something to get the updated data?
syntax = "proto3";
package tasklist;
import "google/protobuf/empty.proto";
service TodoList {
rpc GetTasks(google.protobuf.Empty) returns (stream GetTasksResponse) {}
rpc PostTask(PostTaskRequest) returns (PostTaskRequest) {}
rpc UpdateTask(PostTaskmRequest) returns (PostTaskRequest) {}
}
message Task {
int64 id = 1;
string name = 2;
}
message GetTasksResponse {
Task task = 1;
}
message PostTaskRequest {
Task Task = 1;
}
message PostTaskResponse {
bool result = 1;
}
message UpdateTaskRequest {
Task Task = 1;
}
message UpdateTaskResponse {
bool result = 1;
}

Related

Why does HotelSearch return an ERR.NGHP-DISTRIBUTION.INTERNAL_ERROR?

I am trying to use the HotelSearch REST api. I have tested it on the Dev Studio website by Sabre and while it is slow to respond it works on a Chrome browser. I then tried to get his working in the sample app Rest2SG Sabre provide, as well as Postman. However it returns this error on both platforms:
Here is the method I have added to the sample app:
private void getHotelSearch()
{
Job job = new Job("Getting HotelSearch")
{
#Override
protected IStatus run(IProgressMonitor monitor)
{
setText("Waiting for response...");
toggleAllButtons(false);
try
{
Rest2SgRequest request =
lockId > 0 ? new Rest2SgRequest(lockId) : new Rest2SgRequest();
// for the list of available service action names
// please refer to REST documentation
// this same as action in redapp.xml authorization
request.setUrl("/v2.0.0/hotel/search");
//request.setHeaders(getContentDescription())
// previously generated document, normally developer will
// have to prepare one by himself
String payload = getRequestBody("sample.json"); // we
// preload
request.setPayload(payload);
request.setHttpMethod(HTTPMethod.POST);
request.setContentType("application/json");
request.setAuthTokenType(AuthTokenType.SESSIONLESS);
Rest2SgServiceClient client = new Rest2SgServiceClient(COM);
ClientResponse <Rest2SgResponse> rsp = client.send(request);
LOGGER.info("Rest2Sg request processing success: " + rsp.isSuccess());
if (rsp.isSuccess())
{
// check if processing ended in with success
Rest2SgResponse response = rsp.getPayload();
String responseBody = response.getResponseBody();
response.getResponseCode();
response.getResponseHeaders();
setText(responseBody);
}
else
{
printErrors(rsp.getErrors());
System.out.println(rsp.getErrors().toString());
}
}
catch (Exception e)
{
e.printStackTrace();
}
toggleAllButtons(true);
return Status.OK_STATUS;
}
};
job.schedule();
}
Here is the sample JSON:
{
"HotelSearchRQ": {
"POS": {
"Source": {
"PseudoCityCode": "43X5"
}
},
"SearchCriteria": {
"MaxResults": 20,
"SortBy": "DistanceFrom",
"SortOrder": "ASC",
"TierLabels": false,
"GeoSearch": {
"GeoRef": {
"Radius": 2,
"UOM": "MI",
"RefPoint": {
"Value": "DFW",
"ValueContext": "CODE",
"RefPointType": "6",
"StateProv": "TX",
"CountryCode": "US"
}
}
}
}
}
}
Does anyone know why I am getting the error below?
[Error [code=400, description={"errorCode":"ERR.NGHP-DISTRIBUTION.INTERNAL_ERROR","message":"Error occurred while invoking service restish:convertToOutputFormat:1.71.3","status":"Incomplete","type":"Application","timeStamp":"2022-06-20T21:00:51-05"}, type=HTTP]]
As I mentioned this JSON works on their website.
This problem is caused by missing header information. The following line needs to be added to the request:
request.setHeaders("{\"Accept\": \"application/json\"}");

Azure Bicep template deployment failure with error: "The value of parameter linuxConfiguration.ssh.publicKeys.path is invalid.\"

I created a bicep templated to deploy on Azure (using bash) a linux vm with the associated resources (nic, vnet, subnet, publicIP). Part of the deployment fails; where all the associated resources are deployed but the vm itself fails to deploy.
The error is as follows:
{"status":"Failed","error":{"code":"DeploymentFailed","message":"At least one resource deployment operation failed. Please list deployment operations for details. Please see https://aka.ms/DeployOperations for usage details.","details":[{"code":"BadRequest","message":"{\r\n "error": {\r\n "code": "InvalidParameter",\r\n "message": "Destination path for SSH public keys is currently limited to its default value /home/user/.ssh/authorized_keys due to a known issue in Linux provisioning agent.",\r\n "target": "linuxConfiguration.ssh.publicKeys.path"\r\n }\r\n}"}]}}
The bicep template provided by microsoft uses the path: '/home/${adminUsername}/.ssh/authorized_keys'
I can't seem to figure out a way for it to deploy. Any assistance would greatly appreciated.
Here is the bicep file that causes the error:
#description('Name of the VM')
param vmName string = 'stagingLinuxVM'
#description('location for all resources')
param location string = resourceGroup().location
#description('vm sizes allowed RAM & temp storage in GiB per tier (respectively): 0.5/4; 1/4; 2/4; 4/8; 8/16')
#allowed([
'Standard_B1s'
'Standard_B1ms'
'Standard_B2s'
'Standard_B2ms'
])
param vmSize string = 'Standard_B1s'
#description('Username for the VM')
param adminUsername string
#description('SSH Key for the Virtual Machine')
#secure()
param adminPasswordKey string
#description('name of VNET')
param virtualNetworkName string = 'vnet'
#description('name of the subnet in the virtual network')
param subnetName string = 'Subnet'
param dnsLabelPrefix string = toLower('${vmName}-${uniqueString(resourceGroup().id)}')
var osDiskType = 'Standard_LRS'
var networkInterfaceName = '${vmName}nic'
var addressPrefix = '10.1.0.0/16'
var publicIPAddressName = '${vmName}PublicIP'
var subnetAddressPrefix = '10.1.0.0/24'
var linuxConfiguration = {
disablePasswordAuthentication: true
provisionVMAgent: true
ssh: {
publicKeys: [
{
path: '/home/${adminUsername}/.ssh/authorized_keys'
keyData: adminPasswordKey
}
]
}
}
resource nic 'Microsoft.Network/networkInterfaces#2021-08-01' = {
name: networkInterfaceName
location: location
properties: {
ipConfigurations: [
{
name: 'ipconfig1'
properties: {
subnet: {
id: subnet.id
}
privateIPAllocationMethod: 'Dynamic'
publicIPAddress: {
id: publicIP.id
}
}
}
]
}
}
resource vnet 'Microsoft.Network/virtualNetworks#2021-08-01' = {
name: virtualNetworkName
location: location
properties: {
addressSpace: {
addressPrefixes: [
addressPrefix
]
}
}
}
resource subnet 'Microsoft.Network/virtualNetworks/subnets#2021-08-01' = {
parent: vnet
name: subnetName
properties: {
addressPrefix: subnetAddressPrefix
privateEndpointNetworkPolicies: 'Enabled'
privateLinkServiceNetworkPolicies: 'Enabled'
}
}
resource publicIP 'Microsoft.Network/publicIPAddresses#2021-08-01' = {
name: publicIPAddressName
location: location
sku: {
name: 'Basic'
}
properties: {
publicIPAllocationMethod: 'Dynamic'
publicIPAddressVersion: 'IPv4'
dnsSettings: {
domainNameLabel: dnsLabelPrefix
}
idleTimeoutInMinutes: 4
}
}
resource vm 'Microsoft.Compute/virtualMachines#2021-11-01' = {
name: vmName
location: location
properties: {
hardwareProfile: {
vmSize: vmSize
}
osProfile: {
adminPassword: adminPasswordKey
adminUsername: adminUsername
computerName: vmName
linuxConfiguration: linuxConfiguration
}
storageProfile: {
imageReference: {
offer: 'UbuntuServer'
publisher: 'Canonical'
sku: '18.04-LTS'
version: 'latest'
}
osDisk: {
createOption: 'FromImage'
deleteOption: 'Delete'
diskSizeGB: 32
osType: 'Linux'
managedDisk: {
storageAccountType: osDiskType
}
}
}
networkProfile: {
networkInterfaces: [
{
id: nic.id
}
]
}
}
}
output adminUsername string = adminUsername
output hostname string = publicIP.properties.dnsSettings.fqdn
output sshComand string = 'ssh ${adminUsername}#${publicIP.properties.dnsSettings.fqdn}'

Repeated messages in chatView. how to clear view?

I have a chatView with a list of chatRow Views (messages)
each chatView has a snapshot listener with firebase, so I should get real time updates if I add a new message to the conversation
The problem I have is: when I add a new message my chatView shows ALL the messages I added before plus the new message, PLUS the same list again....if I add another message then the list repeats again
I assume I need to drop/refresh the previous views shown in the Foreach loop...how can I drop/refresh the view so it can receive refreshed NON repeated data?
struct ChatView: View {
#EnvironmentObject var chatModel: ChatsViewModel
let chat: Conversation
let user = UserService.shared.user
#State var messagesSnapshot = [Message]()
#State var newMessageInput = ""
var body: some View {
NavigationView {
VStack {
ScrollViewReader { scrollView in
ScrollView {
ForEach(chat.messages, id: \.id) { message in
if user.name == message.createdBy {
ChatRow(message: message, isMe: true)
} else {
ChatRow(message: message, isMe: false)
}
}
.onAppear(perform: {scrollView.scrollTo(chat.messages.count-1)})
}
}
Spacer()
//send a new message
ZStack {
Rectangle()
.foregroundColor(.white)
RoundedRectangle(cornerRadius: 20)
.stroke(Color("LightGrayColor"), lineWidth: 2)
.padding()
HStack {
TextField("New message...", text: $newMessageInput, onCommit: {
print("Send Message")
})
.padding(30)
Button(action: {
chatModel.sendMessageChat(newMessageInput, in: chat, chatid: chat.id ?? "")
print("Send message.")
}) {
Image(systemName: "paperplane")
.imageScale(.large)
.padding(30)
}
}
}
.frame(height: 70)
}
.navigationTitle("Chat")
}
}
}
function to add message to the conversation
func addMessagesToConv(conversation: Conversation, index: Int) {
var mensajesTotal = [Message]()
let ref = self.db.collection("conversations").document(conversation.id!).collection("messages")
.order(by: "date")
.addSnapshotListener { querySnapshotmsg, error in
if error == nil {
//loop throug the messages/docs
for msgDoc in querySnapshotmsg!.documents {
var m = Message() //emtpy struc message
m.createdBy = msgDoc["created_by"] as? String ?? ""
m.date = msgDoc["date"] as? Timestamp ?? Timestamp()
m.msg = msgDoc["msg"] as? String ?? ""
m.id = msgDoc.documentID //firebase auto id
mensajesTotal.append(m) //append this message to the total of messages
self.chats[index].messages.removeAll()
self.chats[index].messages = mensajesTotal
}
} else {
print("error: \(error!.localizedDescription)")
}
}
}
You've defined mensajesTotal outside of your snapshot listener. So, it's getting appended to every time.
To fix this, move this line:
var mensajesTotal = [Message]()
to inside the addSnapshotListener closure.
You have two options:
Clear mensajesTotal each time you get an update from the database, as #jnpdx's answer shows.
Process the more granular updates in querySnapshotmsg.documentChanges to perform increment updates in your UI, as also shown in the documentation on detecting changes between snapshots.
There is no difference in the data transferred between client and server between these approaches, so use whatever is easiest (that'd typically be #1) or most efficient on the UI (that's usually #2).

Writing logs with cloud logging

I'm having problems with using google-cloud/logging. My objective is to write to a file which is to be created weekly, and i have previously managed to do that. However since yesterday I kept getting this error:
Error: 3 INVALID_ARGUMENT: A monitored resource must be specified for each log entry.
So I updated the google-cloud/logging to the latest version(5.2.2) after reading up about a similar issue of monitored resource not being set automatically. Which did take care of that error, however the logs are not showing up in logs viewer after that change.
My code for the logger utility is as follows
const { Logging } = require('#google-cloud/logging');
exports.LoggingUtil = class LoggingUtil {
constructor(context){
var LogMetadata = {
severity: "INFO",
type: "gce_instance",
labels: {
function_name: process.env.FUNCTION_NAME,
project: process.env.GCLOUD_PROJECT,
region: process.env.FUNCTION_REGION
}
}
this.metadata = {
LogMetadata,
labels: {
execution_id: `${context.eventId}`
}
}
const logging = new Logging();
this.log = logging.log(this.getWeekStamp());
}
getWeekStamp(){
const environmentName = constants.environment.name;
var d = new Date();
var day = d.getDay(),
diff = d.getDate() - day + (day == 0 ? -6:1);
date = new Date(d.setDate(diff)).toLocaleDateString('en-US', { day: '2-digit', month: 'short', year: 'numeric'});
date = date.replace(" ", "-");
return `${date.replace(", ","-")}-week-${environmentName}`;
}
write(text){
var entry = this.log.entry(this.metadata, text);
this.log.write(entry);
}
}
What have I done wrong with this? Any help is appreciated 🙏
I think that you error is related with the way to obtain the metadata variable, because is creating an malformed object that is not readable by the log viewer.
In your method constructor you are creating a metadata object similar to this:
{ "LogMetadata":{
"severity":"INFO",
"type":"gce_instance",
"labels":{
"function_name":process.env.FUNCTION_NAME,
"project":process.env.GCLOUD_PROJECT,
"region":process.env.FUNCTION_REGION
}
},
"labels":{
"execution_id":`${context.eventId}`
}}
that is not a valid MonitoredResource, you can change your code in order to create a valid MonitoredResource for example
var LogMetadata = {
severity: "INFO",
type: "gce_instance",
labels: {
function_name: process.env.FUNCTION_NAME,
project: process.env.GCLOUD_PROJECT,
region: process.env.FUNCTION_REGION
}
}
this.metadata = LogMetadata
this.metadata.labels["execution_id"] = `${context.eventId}`
Example result object
{"severity":"INFO",
"type":"gce_instance",
"labels":{"function_name":process.env.FUNCTION_NAME,
"project":process.env.GCLOUD_PROJECT,
"region": process.env.FUNCTION_REGION,
"execution_id":`${context.eventId}`}
}
Additionally, you can check this example file as a reference to write logs using nodeJS.

Invoking repo actions from multi-select

I've created an UI action using various guides (include Jeff Potts really great ones) successfully and it function exactly as expected - but I want to add that action to the multi-select tool as well. It has been really difficult finding much documentation.
Some things I've tried:
Tried to find out if there was an applicable actionGroup - which there doesn't seem to be.
Tried adding the multi-select tags to my share-config-custom.xml to define the item - it shows up, but I obviously can't seem to use the action ID to reference that action.
My next step was to try and create a js file with a registerAction function in it, which I am able to do and have it run (I can see the console.log dump) but I don't really have any idea how I would go about invoking my repo action from there).
How can I complete this task?
There is already function exist for invoking the repository custom action.This function is defined inside below file.
share-war\components\documentlibrary\actions.js
You can take reference of below code for invoking the repository action.
onActionSimpleRepoAction: function dlA_onActionSimpleRepoAction(record, owner)
{
//ACE-2470 : Clone: Clicking multiple times the simple Workflow approval menu item gives unexpected results.
if (owner.title.indexOf("_deactivated") == -1)
{
// Get action params
var params = this.getAction(record, owner).params,
displayName = record.displayName,
namedParams = ["function", "action", "success", "successMessage", "failure", "failureMessage", "async"],
repoActionParams = {};
for (var name in params)
{
if (params.hasOwnProperty(name) && !Alfresco.util.arrayContains(namedParams, name))
{
repoActionParams[name] = params[name];
}
}
//Deactivate action
var ownerTitle = owner.title;
owner.title = owner.title + "_deactivated";
var async = params.async ? "async=" + params.async : null;
// Prepare genericAction config
var config =
{
success:
{
event:
{
name: "metadataRefresh",
obj: record
}
},
failure:
{
message: this.msg(params.failureMessage, displayName),
fn: function showAction()
{
owner.title = ownerTitle;
},
scope: this
},
webscript:
{
method: Alfresco.util.Ajax.POST,
stem: Alfresco.constants.PROXY_URI + "api/",
name: "actionQueue",
queryString: async
},
config:
{
requestContentType: Alfresco.util.Ajax.JSON,
dataObj:
{
actionedUponNode: record.nodeRef,
actionDefinitionName: params.action,
parameterValues: repoActionParams
}
}
};
// Add configured success callbacks and messages if provided
if (YAHOO.lang.isFunction(this[params.success]))
{
config.success.callback =
{
fn: this[params.success],
obj: record,
scope: this
};
}
if (params.successMessage)
{
config.success.message = this.msg(params.successMessage, displayName);
}
// Acd configured failure callback and message if provided
if (YAHOO.lang.isFunction(this[params.failure]))
{
config.failure.callback =
{
fn: this[params.failure],
obj: record,
scope: this
};
}
if (params.failureMessage)
{
config.failure.message = this.msg(params.failureMessage, displayName);
}
// Execute the repo action
this.modules.actions.genericAction(config);
}
},

Resources