Access the contents of the row inserted into Dynamodb using Pynamodb save method - chalice

I have the below model for a my dynamodb table using pynamodb :
from pynamodb.models import Model
from pynamodb.attributes import (
UnicodeAttribute, UTCDateTimeAttribute, UnicodeSetAttribute, BooleanAttribute
)
class Reminders(Model):
"""Model class for the Reminders table."""
# Information on global secondary index for the table
# user_id (hash key) + reminder_id+reminder_title(sort key)
class Meta:
table_name = 'Reminders'
region = 'eu-central-1'
reminder_id = UnicodeAttribute(hash_key=True)
user_id = UnicodeAttribute(range_key=True)
reminder_title = UnicodeAttribute()
reminder_tags = UnicodeSetAttribute()
reminder_description = UnicodeAttribute()
reminder_frequency = UnicodeAttribute(default='Only once')
reminder_tasks = UnicodeSetAttribute(default=set())
reminder_expiration_date_time = UTCDateTimeAttribute(null=True)
reminder_title_reminder_id = UnicodeAttribute()
next_reminder_date_time = UTCDateTimeAttribute()
should_expire = BooleanAttribute()
When i want to create a new reminder i do it through the below code :
class DynamoBackend:
#staticmethod
def create_a_new_reminder(new_reminder: NewReminder) -> Dict[str, Any]:
"""Create a new reminder using pynamodb."""
new_reminder = models.Reminders(**new_reminder.dict())
return new_reminder.save()
In this case the NewReminder is an instance of pydantic base model like so :
class NewReminder(pydantic.BaseModel):
reminder_id: str
user_id: str
reminder_title: str
reminder_description: str
reminder_tags: Sequence[str]
reminder_frequency: str
should_expire: bool
reminder_expiration_date_time: Optional[datetime.datetime]
next_reminder_date_time: datetime.datetime
reminder_title_reminder_id: str
when i call the save method on the model object i receive the below response:
{
"ConsumedCapacity": {
"CapacityUnits": 2.0,
"TableName": "Reminders"
}
}
Now my question is the save method is directly being called by a lambda function which is in turn called by an API Gateway POST endpoint so ideally the response should be a 201 created and instead of returning the consumed capacity and table name , would be great if it returns the item inserted in the database. Below is my route code :
def create_a_new_reminder():
"""Creates a new reminder in the database."""
request_context = app.current_request.context
request_body = json.loads(app.current_request.raw_body.decode())
request_body["reminder_frequency"] = data_structures.ReminderFrequency[request_body["reminder_frequency"]]
reminder_details = data_structures.ReminderDetailsFromRequest.parse_obj(request_body)
user_details = data_structures.UserDetails(
user_name=request_context["authorizer"]["claims"]["cognito:username"],
user_email=request_context["authorizer"]["claims"]["email"]
)
reminder_id = str(uuid.uuid1())
new_reminder = data_structures.NewReminder(
reminder_id=reminder_id,
user_id=user_details.user_name,
reminder_title=reminder_details.reminder_title,
reminder_description=reminder_details.reminder_description,
reminder_tags=reminder_details.reminder_tags,
reminder_frequency=reminder_details.reminder_frequency.value[0],
should_expire=reminder_details.should_expire,
reminder_expiration_date_time=reminder_details.reminder_expiration_date_time,
next_reminder_date_time=reminder_details.next_reminder_date_time,
reminder_title_reminder_id=f"{reminder_details.reminder_title}-{reminder_id}"
)
return DynamoBackend.create_a_new_reminder(new_reminder=new_reminder)
I am very new to REST API creation and best practices so would be great if someone would guide me here . Thanks in advance !

Related

FLASK-ADDING DATETIME CALCULATION INTO DATABASE

I m trying to perform a little calculation and Logic on date.time with Flask application.
1.) the application will calculate the difference between issue date and expiry date called "remaining days" , The application will check if remaining days is less than 365 days and trigger a function
I attempted the first logic to manipulate the data and submit to database
`#bp.route('/cerpacs/add', methods=['GET', 'POST'])
#login_required
def add_cerpac():
"""
Add a an Cerpac/ expartriates to the database
"""
check_admin()
add_cerpac = True
form =CerpacForm()
if form.validate_on_submit():
cerpac = Cerpac(cerpac_serial_no=form.cerpac_serial_no.data,
cerpac_issue_date= form.cerpac_issue_date.data,
cerpac_exp_date=form.cerpac_exp_date.data,
employee =form.employee.data, )
form.cerpac_issue_date.data = cerpac.cerpac_issue_date
form.cerpac_exp_date.data = cerpac.cerpac_exp_date
if request.method == 'POST':
todays_date = datetime.now()
t = cerpac.cerpac_issue_date
t1 = cerpac.cerpac_exp_date
remaining_days = t1 - t
print(remaining_days) - good prints my result!
remaining_days = cerpac.remaining_days ----not adding to database
try:
add cerpac to the database
db.session.add(cerpac)
db.session.commit()
flash('You have successfully added a Cerpac.' )`
`
my model:
class Cerpac(db.Model):
__tablename__ = 'cerpacs'
id = db.Column(db.Integer, primary_key=True)
cerpac_issue_date = db.Column(db.DateTime)
cerpac_exp_date=db.Column(db.DateTime)
remaining_days = db.Column(db.DateTime)
cerpac_serial_no = db.Column(db.String(60))
cerpac_upload = db.Column(db.String(20), default='cerpac.jpg')
renew_status = db.Column(db.Boolean, default=False)
process_status = db.Column(db.Boolean, default=False)
renewcerpac_id = db.Column(db.Integer, db.ForeignKey('renewcerpacs.id'))
employee_id = db.Column(db.Integer, db.ForeignKey('employees.id'))
def __repr__(self):
return '<Cerpac {}>'.format(self.name) model:
I want to add this to database and eventually write a function like this:
I had a mistake also in the code because I had error issue_date not defined. How do I define issue_date as a date.time variable?
def remaining_days(issue_date, expired_date):
issue_date = datetime(issue_date)
days_to_go = expired - issue
if days_to_go == 365:
renew_status== True
print("time to renew")
print("We are Ok")
I would simplify this to something like:
from datetime import datetime
class Cerpac(db.Model):
...
cerpac_exp_date=db.Column(db.DateTime)
...
#property
def remaining_days(self):
return (self.cerpac_exp_date - self.cerpac_issue_date).days
#property
def days_to_expiry(self):
return (self.cerpac_exp_date - datetime.now()).days
Then, days_to_expiry and remaining_days become properties calculated when you query, and update automatically when they renew their cards.

How to get/build a JavaRDD[DataSet]?

When I use deeplearning4j and try to train a model in Spark
public MultiLayerNetwork fit(JavaRDD<DataSet> trainingData)
fit() need a JavaRDD parameter,
I try to build like this
val totalDaset = csv.map(row => {
val features = Array(
row.getAs[String](0).toDouble, row.getAs[String](1).toDouble
)
val labels = Array(row.getAs[String](21).toDouble)
val featuresINDA = Nd4j.create(features)
val labelsINDA = Nd4j.create(labels)
new DataSet(featuresINDA, labelsINDA)
})
but the tip of IDEA is No implicit arguments of type:Encode[DataSet]
it's a error and I dont know how to solve this problem,
I know SparkRDD can transform to JavaRDD, but I dont know how to build a Spark RDD[DataSet]
DataSet is in import org.nd4j.linalg.dataset.DataSet
Its construction method is
public DataSet(INDArray first, INDArray second) {
this(first, second, (INDArray)null, (INDArray)null);
}
this is my code
val spark:SparkSession = {SparkSession
.builder()
.master("local")
.appName("Spark LSTM Emotion Analysis")
.getOrCreate()
}
import spark.implicits._
val JavaSC = JavaSparkContext.fromSparkContext(spark.sparkContext)
val csv=spark.read.format("csv")
.option("header","true")
.option("sep",",")
.load("/home/hadoop/sparkjobs/LReg/data.csv")
val totalDataset = csv.map(row => {
val features = Array(
row.getAs[String](0).toDouble, row.getAs[String](1).toDouble
)
val labels = Array(row.getAs[String](21).toDouble)
val featuresINDA = Nd4j.create(features)
val labelsINDA = Nd4j.create(labels)
new DataSet(featuresINDA, labelsINDA)
})
val data = totalDataset.toJavaRDD
create JavaRDD[DataSet] by Java in deeplearning4j official guide:
String filePath = "hdfs:///your/path/some_csv_file.csv";
JavaSparkContext sc = new JavaSparkContext();
JavaRDD<String> rddString = sc.textFile(filePath);
RecordReader recordReader = new CSVRecordReader(',');
JavaRDD<List<Writable>> rddWritables = rddString.map(new StringToWritablesFunction(recordReader));
int labelIndex = 5; //Labels: a single integer representing the class index in column number 5
int numLabelClasses = 10; //10 classes for the label
JavaRDD<DataSet> rddDataSetClassification = rddWritables.map(new DataVecDataSetFunction(labelIndex, numLabelClasses, false));
I try to create by scala:
val JavaSC: JavaSparkContext = new JavaSparkContext()
val rddString: JavaRDD[String] = JavaSC.textFile("/home/hadoop/sparkjobs/LReg/hf-data.csv")
val recordReader: CSVRecordReader = new CSVRecordReader(',')
val rddWritables: JavaRDD[List[Writable]] = rddString.map(new StringToWritablesFunction(recordReader))
val featureColnum = 3
val labelColnum = 1
val d = new DataVecDataSetFunction(featureColnum,labelColnum,true,null,null)
// val rddDataSet: JavaRDD[DataSet] = rddWritables.map(new DataVecDataSetFunction(featureColnum,labelColnum, true,null,null))
// can not reslove overloaded method 'map'
debug error infomations:
A DataSet is just a pair of INDArrays. (inputs and labels)
Our docs cover this in depth:
https://deeplearning4j.konduit.ai/distributed-deep-learning/data-howto
For stack overflow sake, I'll summarize what's here since there's no "1" way to create a data pipeline. It's relative to your problem. It's very similar to how you you would create a dataset locally, generally you want to take whatever you do locally and put that in to spark in a function.
CSVs and images for example are going to be very different. But generally you use the datavec library to do that. The docs summarize the approach for each kind.

write an empty dict field using mongoengine

I have observed mongo documents for which document[field] returns {}, but I can't seem to create such a document. Example:
import mongoengine
mongoengine.connect('FOO', host='localhost', port=27017)
class Foo(mongoengine.Document):
some_dict = mongoengine.DictField()
message = mongoengine.StringField()
ID = '59b97ec7c5d65e0c4740b886'
foo = Foo()
foo.some_dict = {}
foo.id = ID
foo.save()
But when I query the record, some_dict is not among the fields, so the last line throws an error:
import pymongo
CON = pymongo.MongoClient('localhost',27017)
x = CON.FOO.foo.find_one()
assert str(x['_id']) == ID
assert 'some_dict' in x.keys()
One obvious workaround is to update the document after it's created, but it feels like too much of a hack. I would prefer if mongoengine made it easy to do directly:
import mongoengine
import pymongo
CON = pymongo.MongoClient('localhost',27017)
CON.Foo.foo.remove()
mongoengine.connect('FOO', host='localhost', port=27017)
class Foo(mongoengine.Document):
some_dict = mongoengine.DictField()
some_list = mongoengine.ListField()
ID = '59b97ec7c5d65e0c4740b886'
foo = Foo()
foo.id = ID
foo.save()
from bson.objectid import ObjectId
CON.FOO.foo.update_one({'_id': ObjectId(ID)},
{'$set': {'some_dict': {}}}, upsert=False)
x = CON.FOO.foo.find_one()
assert str(x['_id']) == ID
assert 'some_dict' in x.keys()

How to get the class reference from KParameter in kotlin?

The code below is about reflection.
It tries to do 2 things:
case1() creates an instance from SimpleStudent class, it works.
case2() creates an instance from Student class, not work.
The reason that case2() not work as well as the question, is that inside that generateValue():
I don't know how to check it is kotlin type or my own type(I have a dirty way to check param.type.toString() not contain "kotlin" but I wonder if there is a better solution
I don't know how to get its class reference when it's a custom class. The problem is that even though param.type.toString() == "Lesson", when I tried to get param.type::class, it's class kotlin.reflect.jvm.internal.KTypeImpl
So, how to solve it? Thanks
==============
import kotlin.reflect.KParameter
import kotlin.reflect.full.primaryConstructor
import kotlin.test.assertEquals
data class Lesson(val title:String, val length:Int)
data class Student(val name:String, val major:Lesson )
data class SimpleStudent(val name:String, val age:Int )
fun generateValue(param:KParameter, originalValue:Map<*,*>):Any? {
var value = originalValue[param.name]
// if (param.type is not Kotlin type){
// // Get its ::class so that we could create the instance of it, here, I mean Lesson class?
// }
return value
}
fun case1(){
val classDesc = SimpleStudent::class
val constructor = classDesc.primaryConstructor!!
val value = mapOf<Any,Any>(
"name" to "Tom",
"age" to 16
)
val params = constructor.parameters.associateBy (
{it},
{generateValue(it, value)}
)
val result:SimpleStudent = constructor.callBy(params)
assertEquals("Tom", result.name)
assertEquals(16, result.age)
}
fun case2(){
val classDesc = Student::class
val constructor = classDesc.primaryConstructor!!
val value = mapOf<Any,Any>(
"name" to "Tom",
"major" to mapOf<Any,Any>(
"title" to "CS",
"length" to 16
)
)
val params = constructor.parameters.associateBy (
{it},
{generateValue(it, value)}
)
val result:Student = constructor.callBy(params)
assertEquals("Tom", result.name)
assertEquals(Lesson::class, result.major::class)
assertEquals("CS", result.major.title)
}
fun main(args : Array<String>) {
case1()
case2()
}
Problem solved:
You could get that ::class by using param.type.classifier as KClass<T> where param is KParameter

web2py SQLFORM.grid url

When I try to put form = SQLFORM.grid(db.mytable) in my controller the request changes to my/web/site/view?_signature=520af19b1095db04dda2f1b6cbea3a03c3551e13 which causes my if statement in controller to collapse. Can smbd please explain why this happens?
If I put user_signature=False then on view load the grid is shown (though the looks is awful, and I still need to find out how to change the view of my table), but on search,edit, etc. click, the same thing happens again. The url is changed and I get an error
Any suggestions?
thank you
EDIT
This is my edit function
#auth.requires_login()
def edit():
#Load workers
workers = db(db.worker.w_organisation == 10).select(db.worker.w_id_w, db.worker.w_organisation, db.worker.w_first_name, db.worker.w_last_name,db.worker.w_nick_name,db.worker.w_email,db.worker.w_status,db.worker.w_note).as_list()
#Define the query object. Here we are pulling all contacts having date of birth less than 18 Nov 1990
query = ((db.worker.w_organisation == 10) & (db.worker.w_status==db.status.s_id_s))
#Define the fields to show on grid. Note: (you need to specify id field in fields section in 1.99.2
fields = (db.worker.w_first_name, db.worker.w_last_name,db.worker.w_nick_name,db.worker.w_email,db.status.s_code,db.worker.w_note)
#Define headers as tuples/dictionaries
headers = { 'worker.w_first_name' : 'Ime',
'worker.w_last_name' : 'Priimek',
'worker.w_nick_name' : 'Vzdevek',
'worker.w_email' : 'E-posta',
'status.s_code': 'Status',
'worker.w_note' : 'Komentar' }
#Let's specify a default sort order on date_of_birth column in grid
default_sort_order=[db.worker.w_last_name]
#Creating the grid object
form = SQLFORM.grid(query=query, fields=fields, headers=headers,searchable=True, orderby=default_sort_order,create=True, \
deletable=True, editable=True, maxtextlength=64, paginate=25,user_signature=False
)
form = SQLFORM.grid(db.worker,user_signature=False)
workersDb = db((db.worker.w_organisation == 10) & (db.worker.w_status==db.status.s_id_s)).select(db.worker.w_id_w, \
db.worker.w_organisation, db.worker.w_first_name, \
db.worker.w_last_name,db.worker.w_nick_name,db.worker.w_email,\
db.status.s_code,db.worker.w_note).as_list()
workersList = []
for rec in workersDb:
status = rec['status']['s_code']
workers = rec['worker']
if not rec["worker"]["w_first_name"]:
polno_ime = rec["worker"]["w_last_name"]
elif not rec["worker"]["w_last_name"]:
polno_ime = rec["worker"]["w_first_name"]
else:
polno_ime = rec["worker"]["w_first_name"] + " " + rec["worker"]["w_last_name"]
rec["worker"]['w_full_name'] = polno_ime
rec["worker"]["w_status"] = status
data = rec["worker"]
#print rec
#print data
workersList.append(rec["worker"])
# If type of arg is int, we know that user wants to edit a script with an id of the argument
if(request.args[0].isdigit()):
script = db(getDbScript(request.args[0])).select(db.script.sc_lls, db.script.sc_name, db.script.id, db.script.sc_menu_data).first()
formData = str(script["sc_menu_data"])
#form = SQLFORM.grid(db.auth_user)
#print formData
# If we dont get any results that means that user is not giving proper request and we show him error
#print script
#Parsing script to be inserted into view
if not script:
return error(0)
return dict(newScript = False, script = script, formData = formData, workers = workersList, form = form)
# If the argument is new we prepare page for new script
elif request.args[0] == 'new':
scripts = db((auth.user.organization == db.script.sc_organization)).select(db.script.sc_name, db.script.id, workers = workersList, form = form)
return dict(newScript = True, scripts = scripts, workers = workersList, form = form)
# Else error
else:
return error(0)
also not to mention the sqlgrid looks awful, here is link to the picture https://plus.google.com/103827646559093653557/posts/Bci4PCG4BQQ

Resources