slick & sqlite : Could not instantiate class tests.SqliteSpec: org.sqlite.JDBC - sqlite

I try to begin an app with scala, play, slick, specs2 & sqlite.here is a trait for sqlite integration:
import scala.slick.driver.SQLiteDriver.simple._
import metier.Objets._
import scala.slick.lifted.ProvenShape
import java.sql.Date
package models {
trait sqlite {
val db = Database.forURL("jdbc:sqlite:rdvs.txt", driver = "org.sqlite.JDBC")
//val db = Database.forDataSource(DB.getDataSource())
class Personnes(tag: Tag) extends Table[Rdv](tag, "RDV") {
def id = column[Int]("ID", O.PrimaryKey, O.AutoInc)
def nom = column[String]("NOM", O.NotNull)
def prénom = column[String]("PRENOM")
def sexe = column[Int]("SEXE")
def télPortable = column[String]("TELPOR")
def télBureau = column[String]("TELBUR")
def télPrivé = column[String]("TELPRI")
def siteRDV = column[String]("SITE")
def typeRDV = column[String]("TYPE")
def libelléRDV = column[String]("LIBELLE")
def numRDV = column[String]("NUMRDV")
def étape = column[String]("ETAPE")
def dateRDV = column[Date]("DATE")
def heureRDVString = column[String]("HEURE")
def statut = column[String]("STATUT")
def orderId = column[String]("ORDERID")
def * = (id.?, nom, prénom, sexe, télPortable, télBureau, télPrivé,
siteRDV, typeRDV, libelléRDV, numRDV, étape, dateRDV, heureRDVString,
statut, orderId) <> (Rdv.tupled, Rdv.unapply _)
}
}
}
the test is this:
package tests {
#RunWith(classOf[JUnitRunner])
class SqliteSpec extends Specification with sqlite {
sequential
"la base sqlite" should {
"create a new database file" in new Sqlite_avant_chaque_test {
todo
}
}
class Sqlite_avant_chaque_test extends Scope {
println("avant test")
//db.withDynSession {
db.withSession { implicit session: Session =>
val personnes = TableQuery[Personnes]
personnes.ddl.create
println("avant tests, après création base")
}
}
}
}
and when I launch the test in eclipse I get this error:
java.lang.Exception: Could not instantiate class tests.SqliteSpec: org.sqlite.JDBC
In my test class, I however have this import :
import scala.slick.driver.SQLiteDriver.simple._
which should load the sqlite driver of slick.
can you help me?

Related

Access the contents of the row inserted into Dynamodb using Pynamodb save method

I have the below model for a my dynamodb table using pynamodb :
from pynamodb.models import Model
from pynamodb.attributes import (
UnicodeAttribute, UTCDateTimeAttribute, UnicodeSetAttribute, BooleanAttribute
)
class Reminders(Model):
"""Model class for the Reminders table."""
# Information on global secondary index for the table
# user_id (hash key) + reminder_id+reminder_title(sort key)
class Meta:
table_name = 'Reminders'
region = 'eu-central-1'
reminder_id = UnicodeAttribute(hash_key=True)
user_id = UnicodeAttribute(range_key=True)
reminder_title = UnicodeAttribute()
reminder_tags = UnicodeSetAttribute()
reminder_description = UnicodeAttribute()
reminder_frequency = UnicodeAttribute(default='Only once')
reminder_tasks = UnicodeSetAttribute(default=set())
reminder_expiration_date_time = UTCDateTimeAttribute(null=True)
reminder_title_reminder_id = UnicodeAttribute()
next_reminder_date_time = UTCDateTimeAttribute()
should_expire = BooleanAttribute()
When i want to create a new reminder i do it through the below code :
class DynamoBackend:
#staticmethod
def create_a_new_reminder(new_reminder: NewReminder) -> Dict[str, Any]:
"""Create a new reminder using pynamodb."""
new_reminder = models.Reminders(**new_reminder.dict())
return new_reminder.save()
In this case the NewReminder is an instance of pydantic base model like so :
class NewReminder(pydantic.BaseModel):
reminder_id: str
user_id: str
reminder_title: str
reminder_description: str
reminder_tags: Sequence[str]
reminder_frequency: str
should_expire: bool
reminder_expiration_date_time: Optional[datetime.datetime]
next_reminder_date_time: datetime.datetime
reminder_title_reminder_id: str
when i call the save method on the model object i receive the below response:
{
"ConsumedCapacity": {
"CapacityUnits": 2.0,
"TableName": "Reminders"
}
}
Now my question is the save method is directly being called by a lambda function which is in turn called by an API Gateway POST endpoint so ideally the response should be a 201 created and instead of returning the consumed capacity and table name , would be great if it returns the item inserted in the database. Below is my route code :
def create_a_new_reminder():
"""Creates a new reminder in the database."""
request_context = app.current_request.context
request_body = json.loads(app.current_request.raw_body.decode())
request_body["reminder_frequency"] = data_structures.ReminderFrequency[request_body["reminder_frequency"]]
reminder_details = data_structures.ReminderDetailsFromRequest.parse_obj(request_body)
user_details = data_structures.UserDetails(
user_name=request_context["authorizer"]["claims"]["cognito:username"],
user_email=request_context["authorizer"]["claims"]["email"]
)
reminder_id = str(uuid.uuid1())
new_reminder = data_structures.NewReminder(
reminder_id=reminder_id,
user_id=user_details.user_name,
reminder_title=reminder_details.reminder_title,
reminder_description=reminder_details.reminder_description,
reminder_tags=reminder_details.reminder_tags,
reminder_frequency=reminder_details.reminder_frequency.value[0],
should_expire=reminder_details.should_expire,
reminder_expiration_date_time=reminder_details.reminder_expiration_date_time,
next_reminder_date_time=reminder_details.next_reminder_date_time,
reminder_title_reminder_id=f"{reminder_details.reminder_title}-{reminder_id}"
)
return DynamoBackend.create_a_new_reminder(new_reminder=new_reminder)
I am very new to REST API creation and best practices so would be great if someone would guide me here . Thanks in advance !

gRPC: From Node Js, How to send Array of float by using repeated bytes of protobuff to python

I would like to send a list of float list via nodejs and receive it in python using protobuff's repeated bytes type.
The graph helps to understand the problem:
I tried with this configuration and what I get on the python side is not really what I expect:
tensors=[b'-TWW', b'-TWW', b'-TWW', b'-TWW']
Here is my test in node.
Client :
const PROTO_PATH = __dirname + '/route_guide.proto';
const async = require('async');
const grpc = require('#grpc/grpc-js');
const protoLoader = require('#grpc/proto-loader');
const packageDefinition = protoLoader.loadSync(
PROTO_PATH,
{
keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true
});
const routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
const client = new routeguide.RouteGuide('localhost:50051',
grpc.credentials.createInsecure());
function runJoin(callback) {
const call = client.join();
call.on('data', function(receivedMessage) {
console.log('Got message "' + JSON.stringify(receivedMessage));
});
call.on('end', callback);
messageToSend = {
msg: 'parameters_res',
parameters_res: {
parameters: {
tensors: [
new Buffer.from(new Float64Array([45.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer)
],
tensor_type: 'numpy.ndarray'
}
}
}
console.log(messageToSend);
console.log(messageToSend.parameters_res.parameters.tensors)
call.write(messageToSend);
call.end();
}
function main() {
async.series([
runJoin
]);
}
if (require.main === module) {
main();
}
exports.runJoin = runJoin;
route_guide.proto:
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.routeguide";
option java_outer_classname = "RouteGuideProto";
option objc_class_prefix = "RTG";
package routeguide;
service RouteGuide {
rpc Join(stream ClientMessage) returns (stream ClientMessage) {}
}
message RouteNote {
repeated bytes model = 1;
}
message ClientMessage {
message Disconnect { Reason reason = 1; }
message ParametersRes { Parameters parameters = 1; }
oneof msg {
Disconnect disconnect = 1;
ParametersRes parameters_res = 2;
}
}
message Parameters {
repeated bytes tensors = 1;
string tensor_type = 2;
}
enum Reason {
UNKNOWN = 0;
RECONNECT = 1;
POWER_DISCONNECTED = 2;
WIFI_UNAVAILABLE = 3;
ACK = 4;
}
Server:
const PROTO_PATH = __dirname + '/route_guide.proto';
const grpc = require('#grpc/grpc-js');
const protoLoader = require('#grpc/proto-loader');
const packageDefinition = protoLoader.loadSync(
PROTO_PATH,
{keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true
});
const routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
function join(call) {
call.on('data', function(receivedMessage) {
console.log("SERVER RECEIVE:");
console.log(receivedMessage);
console.log(receivedMessage.parameters_res.parameters.tensors)
for (const element of receivedMessage.parameters_res.parameters.tensors) {
console.log(element)
}
call.write(receivedMessage);
});
call.on('end', function() {
call.end();
});
}
function getServer() {
var server = new grpc.Server();
server.addService(routeguide.RouteGuide.service, {
join: join
});
return server;
}
if (require.main === module) {
var routeServer = getServer();
routeServer.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => {
routeServer.start()
});
}
exports.getServer = getServer;
MyStartegy.py:
from logging import WARNING
from typing import Callable, Dict, List, Optional, Tuple, cast
import numpy as np
import flwr as fl
from flwr.common import (
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Parameters,
Scalar,
Weights,
)
from flwr.common.logger import log
from flwr.server.client_manager import ClientManager
from flwr.server.client_proxy import ClientProxy
from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg
from flwr.server.strategy import Strategy
from tensorflow import Tensor
DEPRECATION_WARNING = """
DEPRECATION WARNING: deprecated `eval_fn` return format
loss, accuracy
move to
loss, {"accuracy": accuracy}
instead. Note that compatibility with the deprecated return format will be
removed in a future release.
"""
DEPRECATION_WARNING_INITIAL_PARAMETERS = """
DEPRECATION WARNING: deprecated initial parameter type
flwr.common.Weights (i.e., List[np.ndarray])
will be removed in a future update, move to
flwr.common.Parameters
instead. Use
parameters = flwr.common.weights_to_parameters(weights)
to easily transform `Weights` to `Parameters`.
"""
class MyStrategy(Strategy):
"""Configurable FedAvg strategy implementation."""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 2,
min_eval_clients: int = 2,
min_available_clients: int = 2,
eval_fn: Optional[
Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
] = None,
on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
accept_failures: bool = True,
initial_parameters: Optional[Parameters] = None,
) -> None:
"""Federated Averaging strategy.
Implementation based on https://arxiv.org/abs/1602.05629
Args:
fraction_fit (float, optional): Fraction of clients used during
training. Defaults to 0.1.
fraction_eval (float, optional): Fraction of clients used during
validation. Defaults to 0.1.
min_fit_clients (int, optional): Minimum number of clients used
during training. Defaults to 2.
min_eval_clients (int, optional): Minimum number of clients used
during validation. Defaults to 2.
min_available_clients (int, optional): Minimum number of total
clients in the system. Defaults to 2.
eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):
Function used for validation. Defaults to None.
on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):
Function used to configure training. Defaults to None.
on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):
Function used to configure validation. Defaults to None.
accept_failures (bool, optional): Whether or not accept rounds
containing failures. Defaults to True.
initial_parameters (Parameters, optional): Initial global model parameters.
"""
super().__init__()
self.min_fit_clients = min_fit_clients
self.min_eval_clients = min_eval_clients
self.fraction_fit = fraction_fit
self.fraction_eval = fraction_eval
self.min_available_clients = min_available_clients
self.eval_fn = eval_fn
self.on_fit_config_fn = on_fit_config_fn
self.on_evaluate_config_fn = on_evaluate_config_fn
self.accept_failures = accept_failures
self.initial_parameters = initial_parameters
def __repr__(self) -> str:
rep = f"FedAvg(accept_failures={self.accept_failures})"
return rep
def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Return the sample size and the required number of available
clients."""
num_clients = int(num_available_clients * self.fraction_fit)
return max(num_clients, self.min_fit_clients), self.min_available_clients
def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for evaluation."""
num_clients = int(num_available_clients * self.fraction_eval)
return max(num_clients, self.min_eval_clients), self.min_available_clients
def initialize_parameters(
self, client_manager: ClientManager
) -> Optional[Parameters]:
"""Initialize global model parameters."""
initial_parameters = self.initial_parameters
self.initial_parameters = None # Don't keep initial parameters in memory
if isinstance(initial_parameters, list):
log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)
initial_parameters = self.weights_to_parameters(weights=initial_parameters)
return initial_parameters
def evaluate(
self, parameters: Parameters
) -> Optional[Tuple[float, Dict[str, Scalar]]]:
"""Evaluate model parameters using an evaluation function."""
if self.eval_fn is None:
# No evaluation function provided
return None
weights = self.parameters_to_weights(parameters)
eval_res = self.eval_fn(weights)
if eval_res is None:
return None
loss, other = eval_res
if isinstance(other, float):
print(DEPRECATION_WARNING)
metrics = {"accuracy": other}
else:
metrics = other
return loss, metrics
def configure_fit(
self, rnd: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, FitIns]]:
"""Configure the next round of training."""
config = {}
if self.on_fit_config_fn is not None:
# Custom fit config function provided
config = self.on_fit_config_fn(rnd)
fit_ins = FitIns(parameters, config)
# Sample clients
sample_size, min_num_clients = self.num_fit_clients(
client_manager.num_available()
)
clients = client_manager.sample(
num_clients=sample_size, min_num_clients=min_num_clients
)
# Return client/config pairs
return [(client, fit_ins) for client in clients]
def configure_evaluate(
self, rnd: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, EvaluateIns]]:
"""Configure the next round of evaluation."""
# Do not configure federated evaluation if fraction_eval is 0
if self.fraction_eval == 0.0:
return []
# Parameters and config
config = {}
if self.on_evaluate_config_fn is not None:
# Custom evaluation config function provided
config = self.on_evaluate_config_fn(rnd)
evaluate_ins = EvaluateIns(parameters, config)
# Sample clients
if rnd >= 0:
sample_size, min_num_clients = self.num_evaluation_clients(
client_manager.num_available()
)
clients = client_manager.sample(
num_clients=sample_size, min_num_clients=min_num_clients
)
else:
clients = list(client_manager.all().values())
# Return client/config pairs
return [(client, evaluate_ins) for client in clients]
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
"""Aggregate fit results using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
# Convert results
print("\n\n aggregate_fit")
print(results)
weights_results = [
(self.parameters_to_weights(fit_res.parameters), fit_res.num_examples)
for client, fit_res in results
]
print("weights_results")
print(weights_results)
return self.weights_to_parameters(aggregate(weights_results)), {}
def aggregate_evaluate(
self,
rnd: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[BaseException],
) -> Tuple[Optional[float], Dict[str, Scalar]]:
"""Aggregate evaluation losses using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
loss_aggregated = weighted_loss_avg(
[
(evaluate_res.num_examples, evaluate_res.loss)
for _, evaluate_res in results
]
)
return loss_aggregated, {}
def weights_to_parameters(self, weights: Weights) -> Parameters:
"""Convert NumPy weights to parameters object."""
print('weights_to_parameters')
print(weights)
tensors = [self.ndarray_to_bytes(ndarray) for ndarray in weights]
return Parameters(tensors=tensors, tensor_type="numpy.nda")
def parameters_to_weights(self, parameters: Parameters) -> Weights:
"""Convert parameters object to NumPy weights."""
print('parameters_to_weights')
print(parameters)
return [self.bytes_to_ndarray(tensor) for tensor in parameters.tensors]
# pylint: disable=R0201
def ndarray_to_bytes(self, ndarray: np.ndarray) -> bytes:
"""Serialize NumPy array to bytes."""
print('ndarray_to_bytes')
print(ndarray)
return None
# pylint: disable=R0201
def bytes_to_ndarray(self, tensor: bytes) -> np.ndarray:
"""Deserialize NumPy array from bytes."""
print('bytes_to_ndarray')
print(tensor)
return None
# Start Flower server for three rounds of federated learning
fl.server.start_server(
server_address='localhost:5006',
config={"num_rounds": 2},
strategy=MyStrategy()
)
Is Float64Array the right type?
What should I use on the python side to deserialize the data?
I specify that I cannot modify the proto.
Thank you in advance for your explanations.

Overriding methods from a derived class

I have the following coding:-
Progam 1
class Person :
'''A base to define Person properties.'''
def __inti__( self , name ):
self.name = name
def speak( self , msg = '(Calling The Base Class)' ):
print( self.name , msg )
Program 2
from Person import *
'''A derived class to define Man properties.'''
class Man( Person ):
def speak( self , msg ):
print( self.name , ':\n\tHello!' , msg )
Program 3
from Person import *
'''A derived class to define Hombre properties.'''
class Hombre( Person ):
def speak( self , msg ):
print( self.name , ':\n\tHola!' , msg )
Program 4
from Man import *
from Hombre import *
guy__1 = Man('Richard')
guy__2 = Hombre('Ricardo')
guy__1.speak( 'It\'s a beautiful evening.\n' )
guy__2.speak( 'Es una tarde hermosa.\n' )
Person.speak( guy__1 )
Person.speak( guy__2 )
I get the following error;-
guy__1 = Man('Richard')
TypeError: object() takes no parameters
change
def __inti__( self , name ):
to
def __init__( self , name ):

How to create a custom transformer with out any input column?

We have requirement where in we wanted to generate scores of our model with some random values in between 0-1.
To do that we wanted to have a custom transformer which will be generating random numbers with out any input fields.
So can we generate a transformer without input fields in mleap?
Like usually we do create as below:
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types._
case class RandomNumberModel() extends Model {
private val rnd = scala.util.Random
def apply(): Double = rnd.nextFloat
override def inputSchema: StructType = StructType("input" -> ScalarType.String).get
override def outputSchema: StructType = StructType("output" -> ScalarType.Double ).get
}
How to make it as input schema no necessary to put?
I have never tried that, but given how I achieved to have a custom transformer with multiple input fields ...
package org.apache.spark.ml.feature.mleap
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types._
import org.apache.spark.ml.linalg._
case class PropertyGroupAggregatorBaseModel (props: Array[String],
aggFunc: String) extends Model {
val outputSize = props.size
//having multiple inputs, you will have apply with a parameter Seq[Any]
def apply(features: Seq[Any]): Vector = {
val properties = features(0).asInstanceOf[Seq[String]]
val values = features(1).asInstanceOf[Seq[Double]]
val mapping = properties.zip(values)
val histogram = props.foldLeft(Array.empty[Double]){
(acc, property) =>
val newValues = mapping.filter(x => x._1 == property).map(x => x._2)
val newAggregate = aggFunc match {
case "sum" => newValues.sum.toDouble
case "count" => newValues.size.toDouble
case "avg" => (newValues.sum / Math.max(newValues.size, 1)).toDouble
}
acc :+ newAggregate
}
Vectors.dense(histogram)
}
override def inputSchema: StructType = {
//here you define the input
val inputFields = Seq(
StructField("input1" -> ListType(BasicType.String)),
StructField("input2" -> ListType(BasicType.Double))
)
StructType(inputFields).get
}
override def outputSchema: StructType = StructType(StructField("output" -> TensorType.Double(outputSize))).get
}
My suggestion would be, that the apply might already work for you. I guess if you define inputSchema as follows, it might work:
override def inputSchema: StructType = {
//here you define the input
val inputFields = Seq.empty[StructField]
StructType(inputFields).get
}

write an empty dict field using mongoengine

I have observed mongo documents for which document[field] returns {}, but I can't seem to create such a document. Example:
import mongoengine
mongoengine.connect('FOO', host='localhost', port=27017)
class Foo(mongoengine.Document):
some_dict = mongoengine.DictField()
message = mongoengine.StringField()
ID = '59b97ec7c5d65e0c4740b886'
foo = Foo()
foo.some_dict = {}
foo.id = ID
foo.save()
But when I query the record, some_dict is not among the fields, so the last line throws an error:
import pymongo
CON = pymongo.MongoClient('localhost',27017)
x = CON.FOO.foo.find_one()
assert str(x['_id']) == ID
assert 'some_dict' in x.keys()
One obvious workaround is to update the document after it's created, but it feels like too much of a hack. I would prefer if mongoengine made it easy to do directly:
import mongoengine
import pymongo
CON = pymongo.MongoClient('localhost',27017)
CON.Foo.foo.remove()
mongoengine.connect('FOO', host='localhost', port=27017)
class Foo(mongoengine.Document):
some_dict = mongoengine.DictField()
some_list = mongoengine.ListField()
ID = '59b97ec7c5d65e0c4740b886'
foo = Foo()
foo.id = ID
foo.save()
from bson.objectid import ObjectId
CON.FOO.foo.update_one({'_id': ObjectId(ID)},
{'$set': {'some_dict': {}}}, upsert=False)
x = CON.FOO.foo.find_one()
assert str(x['_id']) == ID
assert 'some_dict' in x.keys()

Resources