How to create a custom transformer with out any input column? - mleap

We have requirement where in we wanted to generate scores of our model with some random values in between 0-1.
To do that we wanted to have a custom transformer which will be generating random numbers with out any input fields.
So can we generate a transformer without input fields in mleap?
Like usually we do create as below:
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types._
case class RandomNumberModel() extends Model {
private val rnd = scala.util.Random
def apply(): Double = rnd.nextFloat
override def inputSchema: StructType = StructType("input" -> ScalarType.String).get
override def outputSchema: StructType = StructType("output" -> ScalarType.Double ).get
}
How to make it as input schema no necessary to put?

I have never tried that, but given how I achieved to have a custom transformer with multiple input fields ...
package org.apache.spark.ml.feature.mleap
import ml.combust.mleap.core.Model
import ml.combust.mleap.core.types._
import org.apache.spark.ml.linalg._
case class PropertyGroupAggregatorBaseModel (props: Array[String],
aggFunc: String) extends Model {
val outputSize = props.size
//having multiple inputs, you will have apply with a parameter Seq[Any]
def apply(features: Seq[Any]): Vector = {
val properties = features(0).asInstanceOf[Seq[String]]
val values = features(1).asInstanceOf[Seq[Double]]
val mapping = properties.zip(values)
val histogram = props.foldLeft(Array.empty[Double]){
(acc, property) =>
val newValues = mapping.filter(x => x._1 == property).map(x => x._2)
val newAggregate = aggFunc match {
case "sum" => newValues.sum.toDouble
case "count" => newValues.size.toDouble
case "avg" => (newValues.sum / Math.max(newValues.size, 1)).toDouble
}
acc :+ newAggregate
}
Vectors.dense(histogram)
}
override def inputSchema: StructType = {
//here you define the input
val inputFields = Seq(
StructField("input1" -> ListType(BasicType.String)),
StructField("input2" -> ListType(BasicType.Double))
)
StructType(inputFields).get
}
override def outputSchema: StructType = StructType(StructField("output" -> TensorType.Double(outputSize))).get
}
My suggestion would be, that the apply might already work for you. I guess if you define inputSchema as follows, it might work:
override def inputSchema: StructType = {
//here you define the input
val inputFields = Seq.empty[StructField]
StructType(inputFields).get
}

Related

gRPC: From Node Js, How to send Array of float by using repeated bytes of protobuff to python

I would like to send a list of float list via nodejs and receive it in python using protobuff's repeated bytes type.
The graph helps to understand the problem:
I tried with this configuration and what I get on the python side is not really what I expect:
tensors=[b'-TWW', b'-TWW', b'-TWW', b'-TWW']
Here is my test in node.
Client :
const PROTO_PATH = __dirname + '/route_guide.proto';
const async = require('async');
const grpc = require('#grpc/grpc-js');
const protoLoader = require('#grpc/proto-loader');
const packageDefinition = protoLoader.loadSync(
PROTO_PATH,
{
keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true
});
const routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
const client = new routeguide.RouteGuide('localhost:50051',
grpc.credentials.createInsecure());
function runJoin(callback) {
const call = client.join();
call.on('data', function(receivedMessage) {
console.log('Got message "' + JSON.stringify(receivedMessage));
});
call.on('end', callback);
messageToSend = {
msg: 'parameters_res',
parameters_res: {
parameters: {
tensors: [
new Buffer.from(new Float64Array([45.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer),
new Buffer.from(new Float64Array([45.1, 84.5, 87.9, 87.1]).buffer)
],
tensor_type: 'numpy.ndarray'
}
}
}
console.log(messageToSend);
console.log(messageToSend.parameters_res.parameters.tensors)
call.write(messageToSend);
call.end();
}
function main() {
async.series([
runJoin
]);
}
if (require.main === module) {
main();
}
exports.runJoin = runJoin;
route_guide.proto:
syntax = "proto3";
option java_multiple_files = true;
option java_package = "io.grpc.examples.routeguide";
option java_outer_classname = "RouteGuideProto";
option objc_class_prefix = "RTG";
package routeguide;
service RouteGuide {
rpc Join(stream ClientMessage) returns (stream ClientMessage) {}
}
message RouteNote {
repeated bytes model = 1;
}
message ClientMessage {
message Disconnect { Reason reason = 1; }
message ParametersRes { Parameters parameters = 1; }
oneof msg {
Disconnect disconnect = 1;
ParametersRes parameters_res = 2;
}
}
message Parameters {
repeated bytes tensors = 1;
string tensor_type = 2;
}
enum Reason {
UNKNOWN = 0;
RECONNECT = 1;
POWER_DISCONNECTED = 2;
WIFI_UNAVAILABLE = 3;
ACK = 4;
}
Server:
const PROTO_PATH = __dirname + '/route_guide.proto';
const grpc = require('#grpc/grpc-js');
const protoLoader = require('#grpc/proto-loader');
const packageDefinition = protoLoader.loadSync(
PROTO_PATH,
{keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true
});
const routeguide = grpc.loadPackageDefinition(packageDefinition).routeguide;
function join(call) {
call.on('data', function(receivedMessage) {
console.log("SERVER RECEIVE:");
console.log(receivedMessage);
console.log(receivedMessage.parameters_res.parameters.tensors)
for (const element of receivedMessage.parameters_res.parameters.tensors) {
console.log(element)
}
call.write(receivedMessage);
});
call.on('end', function() {
call.end();
});
}
function getServer() {
var server = new grpc.Server();
server.addService(routeguide.RouteGuide.service, {
join: join
});
return server;
}
if (require.main === module) {
var routeServer = getServer();
routeServer.bindAsync('0.0.0.0:50051', grpc.ServerCredentials.createInsecure(), () => {
routeServer.start()
});
}
exports.getServer = getServer;
MyStartegy.py:
from logging import WARNING
from typing import Callable, Dict, List, Optional, Tuple, cast
import numpy as np
import flwr as fl
from flwr.common import (
EvaluateIns,
EvaluateRes,
FitIns,
FitRes,
Parameters,
Scalar,
Weights,
)
from flwr.common.logger import log
from flwr.server.client_manager import ClientManager
from flwr.server.client_proxy import ClientProxy
from flwr.server.strategy.aggregate import aggregate, weighted_loss_avg
from flwr.server.strategy import Strategy
from tensorflow import Tensor
DEPRECATION_WARNING = """
DEPRECATION WARNING: deprecated `eval_fn` return format
loss, accuracy
move to
loss, {"accuracy": accuracy}
instead. Note that compatibility with the deprecated return format will be
removed in a future release.
"""
DEPRECATION_WARNING_INITIAL_PARAMETERS = """
DEPRECATION WARNING: deprecated initial parameter type
flwr.common.Weights (i.e., List[np.ndarray])
will be removed in a future update, move to
flwr.common.Parameters
instead. Use
parameters = flwr.common.weights_to_parameters(weights)
to easily transform `Weights` to `Parameters`.
"""
class MyStrategy(Strategy):
"""Configurable FedAvg strategy implementation."""
# pylint: disable=too-many-arguments,too-many-instance-attributes
def __init__(
self,
fraction_fit: float = 0.1,
fraction_eval: float = 0.1,
min_fit_clients: int = 2,
min_eval_clients: int = 2,
min_available_clients: int = 2,
eval_fn: Optional[
Callable[[Weights], Optional[Tuple[float, Dict[str, Scalar]]]]
] = None,
on_fit_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
on_evaluate_config_fn: Optional[Callable[[int], Dict[str, Scalar]]] = None,
accept_failures: bool = True,
initial_parameters: Optional[Parameters] = None,
) -> None:
"""Federated Averaging strategy.
Implementation based on https://arxiv.org/abs/1602.05629
Args:
fraction_fit (float, optional): Fraction of clients used during
training. Defaults to 0.1.
fraction_eval (float, optional): Fraction of clients used during
validation. Defaults to 0.1.
min_fit_clients (int, optional): Minimum number of clients used
during training. Defaults to 2.
min_eval_clients (int, optional): Minimum number of clients used
during validation. Defaults to 2.
min_available_clients (int, optional): Minimum number of total
clients in the system. Defaults to 2.
eval_fn (Callable[[Weights], Optional[Tuple[float, float]]], optional):
Function used for validation. Defaults to None.
on_fit_config_fn (Callable[[int], Dict[str, Scalar]], optional):
Function used to configure training. Defaults to None.
on_evaluate_config_fn (Callable[[int], Dict[str, Scalar]], optional):
Function used to configure validation. Defaults to None.
accept_failures (bool, optional): Whether or not accept rounds
containing failures. Defaults to True.
initial_parameters (Parameters, optional): Initial global model parameters.
"""
super().__init__()
self.min_fit_clients = min_fit_clients
self.min_eval_clients = min_eval_clients
self.fraction_fit = fraction_fit
self.fraction_eval = fraction_eval
self.min_available_clients = min_available_clients
self.eval_fn = eval_fn
self.on_fit_config_fn = on_fit_config_fn
self.on_evaluate_config_fn = on_evaluate_config_fn
self.accept_failures = accept_failures
self.initial_parameters = initial_parameters
def __repr__(self) -> str:
rep = f"FedAvg(accept_failures={self.accept_failures})"
return rep
def num_fit_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Return the sample size and the required number of available
clients."""
num_clients = int(num_available_clients * self.fraction_fit)
return max(num_clients, self.min_fit_clients), self.min_available_clients
def num_evaluation_clients(self, num_available_clients: int) -> Tuple[int, int]:
"""Use a fraction of available clients for evaluation."""
num_clients = int(num_available_clients * self.fraction_eval)
return max(num_clients, self.min_eval_clients), self.min_available_clients
def initialize_parameters(
self, client_manager: ClientManager
) -> Optional[Parameters]:
"""Initialize global model parameters."""
initial_parameters = self.initial_parameters
self.initial_parameters = None # Don't keep initial parameters in memory
if isinstance(initial_parameters, list):
log(WARNING, DEPRECATION_WARNING_INITIAL_PARAMETERS)
initial_parameters = self.weights_to_parameters(weights=initial_parameters)
return initial_parameters
def evaluate(
self, parameters: Parameters
) -> Optional[Tuple[float, Dict[str, Scalar]]]:
"""Evaluate model parameters using an evaluation function."""
if self.eval_fn is None:
# No evaluation function provided
return None
weights = self.parameters_to_weights(parameters)
eval_res = self.eval_fn(weights)
if eval_res is None:
return None
loss, other = eval_res
if isinstance(other, float):
print(DEPRECATION_WARNING)
metrics = {"accuracy": other}
else:
metrics = other
return loss, metrics
def configure_fit(
self, rnd: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, FitIns]]:
"""Configure the next round of training."""
config = {}
if self.on_fit_config_fn is not None:
# Custom fit config function provided
config = self.on_fit_config_fn(rnd)
fit_ins = FitIns(parameters, config)
# Sample clients
sample_size, min_num_clients = self.num_fit_clients(
client_manager.num_available()
)
clients = client_manager.sample(
num_clients=sample_size, min_num_clients=min_num_clients
)
# Return client/config pairs
return [(client, fit_ins) for client in clients]
def configure_evaluate(
self, rnd: int, parameters: Parameters, client_manager: ClientManager
) -> List[Tuple[ClientProxy, EvaluateIns]]:
"""Configure the next round of evaluation."""
# Do not configure federated evaluation if fraction_eval is 0
if self.fraction_eval == 0.0:
return []
# Parameters and config
config = {}
if self.on_evaluate_config_fn is not None:
# Custom evaluation config function provided
config = self.on_evaluate_config_fn(rnd)
evaluate_ins = EvaluateIns(parameters, config)
# Sample clients
if rnd >= 0:
sample_size, min_num_clients = self.num_evaluation_clients(
client_manager.num_available()
)
clients = client_manager.sample(
num_clients=sample_size, min_num_clients=min_num_clients
)
else:
clients = list(client_manager.all().values())
# Return client/config pairs
return [(client, evaluate_ins) for client in clients]
def aggregate_fit(
self,
rnd: int,
results: List[Tuple[ClientProxy, FitRes]],
failures: List[BaseException],
) -> Tuple[Optional[Parameters], Dict[str, Scalar]]:
"""Aggregate fit results using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
# Convert results
print("\n\n aggregate_fit")
print(results)
weights_results = [
(self.parameters_to_weights(fit_res.parameters), fit_res.num_examples)
for client, fit_res in results
]
print("weights_results")
print(weights_results)
return self.weights_to_parameters(aggregate(weights_results)), {}
def aggregate_evaluate(
self,
rnd: int,
results: List[Tuple[ClientProxy, EvaluateRes]],
failures: List[BaseException],
) -> Tuple[Optional[float], Dict[str, Scalar]]:
"""Aggregate evaluation losses using weighted average."""
if not results:
return None, {}
# Do not aggregate if there are failures and failures are not accepted
if not self.accept_failures and failures:
return None, {}
loss_aggregated = weighted_loss_avg(
[
(evaluate_res.num_examples, evaluate_res.loss)
for _, evaluate_res in results
]
)
return loss_aggregated, {}
def weights_to_parameters(self, weights: Weights) -> Parameters:
"""Convert NumPy weights to parameters object."""
print('weights_to_parameters')
print(weights)
tensors = [self.ndarray_to_bytes(ndarray) for ndarray in weights]
return Parameters(tensors=tensors, tensor_type="numpy.nda")
def parameters_to_weights(self, parameters: Parameters) -> Weights:
"""Convert parameters object to NumPy weights."""
print('parameters_to_weights')
print(parameters)
return [self.bytes_to_ndarray(tensor) for tensor in parameters.tensors]
# pylint: disable=R0201
def ndarray_to_bytes(self, ndarray: np.ndarray) -> bytes:
"""Serialize NumPy array to bytes."""
print('ndarray_to_bytes')
print(ndarray)
return None
# pylint: disable=R0201
def bytes_to_ndarray(self, tensor: bytes) -> np.ndarray:
"""Deserialize NumPy array from bytes."""
print('bytes_to_ndarray')
print(tensor)
return None
# Start Flower server for three rounds of federated learning
fl.server.start_server(
server_address='localhost:5006',
config={"num_rounds": 2},
strategy=MyStrategy()
)
Is Float64Array the right type?
What should I use on the python side to deserialize the data?
I specify that I cannot modify the proto.
Thank you in advance for your explanations.

How to get/build a JavaRDD[DataSet]?

When I use deeplearning4j and try to train a model in Spark
public MultiLayerNetwork fit(JavaRDD<DataSet> trainingData)
fit() need a JavaRDD parameter,
I try to build like this
val totalDaset = csv.map(row => {
val features = Array(
row.getAs[String](0).toDouble, row.getAs[String](1).toDouble
)
val labels = Array(row.getAs[String](21).toDouble)
val featuresINDA = Nd4j.create(features)
val labelsINDA = Nd4j.create(labels)
new DataSet(featuresINDA, labelsINDA)
})
but the tip of IDEA is No implicit arguments of type:Encode[DataSet]
it's a error and I dont know how to solve this problem,
I know SparkRDD can transform to JavaRDD, but I dont know how to build a Spark RDD[DataSet]
DataSet is in import org.nd4j.linalg.dataset.DataSet
Its construction method is
public DataSet(INDArray first, INDArray second) {
this(first, second, (INDArray)null, (INDArray)null);
}
this is my code
val spark:SparkSession = {SparkSession
.builder()
.master("local")
.appName("Spark LSTM Emotion Analysis")
.getOrCreate()
}
import spark.implicits._
val JavaSC = JavaSparkContext.fromSparkContext(spark.sparkContext)
val csv=spark.read.format("csv")
.option("header","true")
.option("sep",",")
.load("/home/hadoop/sparkjobs/LReg/data.csv")
val totalDataset = csv.map(row => {
val features = Array(
row.getAs[String](0).toDouble, row.getAs[String](1).toDouble
)
val labels = Array(row.getAs[String](21).toDouble)
val featuresINDA = Nd4j.create(features)
val labelsINDA = Nd4j.create(labels)
new DataSet(featuresINDA, labelsINDA)
})
val data = totalDataset.toJavaRDD
create JavaRDD[DataSet] by Java in deeplearning4j official guide:
String filePath = "hdfs:///your/path/some_csv_file.csv";
JavaSparkContext sc = new JavaSparkContext();
JavaRDD<String> rddString = sc.textFile(filePath);
RecordReader recordReader = new CSVRecordReader(',');
JavaRDD<List<Writable>> rddWritables = rddString.map(new StringToWritablesFunction(recordReader));
int labelIndex = 5; //Labels: a single integer representing the class index in column number 5
int numLabelClasses = 10; //10 classes for the label
JavaRDD<DataSet> rddDataSetClassification = rddWritables.map(new DataVecDataSetFunction(labelIndex, numLabelClasses, false));
I try to create by scala:
val JavaSC: JavaSparkContext = new JavaSparkContext()
val rddString: JavaRDD[String] = JavaSC.textFile("/home/hadoop/sparkjobs/LReg/hf-data.csv")
val recordReader: CSVRecordReader = new CSVRecordReader(',')
val rddWritables: JavaRDD[List[Writable]] = rddString.map(new StringToWritablesFunction(recordReader))
val featureColnum = 3
val labelColnum = 1
val d = new DataVecDataSetFunction(featureColnum,labelColnum,true,null,null)
// val rddDataSet: JavaRDD[DataSet] = rddWritables.map(new DataVecDataSetFunction(featureColnum,labelColnum, true,null,null))
// can not reslove overloaded method 'map'
debug error infomations:
A DataSet is just a pair of INDArrays. (inputs and labels)
Our docs cover this in depth:
https://deeplearning4j.konduit.ai/distributed-deep-learning/data-howto
For stack overflow sake, I'll summarize what's here since there's no "1" way to create a data pipeline. It's relative to your problem. It's very similar to how you you would create a dataset locally, generally you want to take whatever you do locally and put that in to spark in a function.
CSVs and images for example are going to be very different. But generally you use the datavec library to do that. The docs summarize the approach for each kind.

How to get the class reference from KParameter in kotlin?

The code below is about reflection.
It tries to do 2 things:
case1() creates an instance from SimpleStudent class, it works.
case2() creates an instance from Student class, not work.
The reason that case2() not work as well as the question, is that inside that generateValue():
I don't know how to check it is kotlin type or my own type(I have a dirty way to check param.type.toString() not contain "kotlin" but I wonder if there is a better solution
I don't know how to get its class reference when it's a custom class. The problem is that even though param.type.toString() == "Lesson", when I tried to get param.type::class, it's class kotlin.reflect.jvm.internal.KTypeImpl
So, how to solve it? Thanks
==============
import kotlin.reflect.KParameter
import kotlin.reflect.full.primaryConstructor
import kotlin.test.assertEquals
data class Lesson(val title:String, val length:Int)
data class Student(val name:String, val major:Lesson )
data class SimpleStudent(val name:String, val age:Int )
fun generateValue(param:KParameter, originalValue:Map<*,*>):Any? {
var value = originalValue[param.name]
// if (param.type is not Kotlin type){
// // Get its ::class so that we could create the instance of it, here, I mean Lesson class?
// }
return value
}
fun case1(){
val classDesc = SimpleStudent::class
val constructor = classDesc.primaryConstructor!!
val value = mapOf<Any,Any>(
"name" to "Tom",
"age" to 16
)
val params = constructor.parameters.associateBy (
{it},
{generateValue(it, value)}
)
val result:SimpleStudent = constructor.callBy(params)
assertEquals("Tom", result.name)
assertEquals(16, result.age)
}
fun case2(){
val classDesc = Student::class
val constructor = classDesc.primaryConstructor!!
val value = mapOf<Any,Any>(
"name" to "Tom",
"major" to mapOf<Any,Any>(
"title" to "CS",
"length" to 16
)
)
val params = constructor.parameters.associateBy (
{it},
{generateValue(it, value)}
)
val result:Student = constructor.callBy(params)
assertEquals("Tom", result.name)
assertEquals(Lesson::class, result.major::class)
assertEquals("CS", result.major.title)
}
fun main(args : Array<String>) {
case1()
case2()
}
Problem solved:
You could get that ::class by using param.type.classifier as KClass<T> where param is KParameter

How to convert List to Map in Kotlin?

For example I have a list of strings like:
val list = listOf("a", "b", "c", "d")
and I want to convert it to a map, where the strings are the keys.
I know I should use the .toMap() function, but I don't know how, and I haven't seen any examples of it.
You have two choices:
The first and most performant is to use associateBy function that takes two lambdas for generating the key and value, and inlines the creation of the map:
val map = friends.associateBy({it.facebookId}, {it.points})
The second, less performant, is to use the standard map function to create a list of Pair which can be used by toMap to generate the final map:
val map = friends.map { it.facebookId to it.points }.toMap()
From List to Map with associate function
With Kotlin 1.3, List has a function called associate. associate has the following declaration:
fun <T, K, V> Iterable<T>.associate(transform: (T) -> Pair<K, V>): Map<K, V>
Returns a Map containing key-value pairs provided by transform function applied to elements of the given collection.
Usage:
class Person(val name: String, val id: Int)
fun main() {
val friends = listOf(Person("Sue Helen", 1), Person("JR", 2), Person("Pamela", 3))
val map = friends.associate({ Pair(it.id, it.name) })
//val map = friends.associate({ it.id to it.name }) // also works
println(map) // prints: {1=Sue Helen, 2=JR, 3=Pamela}
}
From List to Map with associateBy function
With Kotlin, List has a function called associateBy. associateBy has the following declaration:
fun <T, K, V> Iterable<T>.associateBy(keySelector: (T) -> K, valueTransform: (T) -> V): Map<K, V>
Returns a Map containing the values provided by valueTransform and indexed by keySelector functions applied to elements of the given collection.
Usage:
class Person(val name: String, val id: Int)
fun main() {
val friends = listOf(Person("Sue Helen", 1), Person("JR", 2), Person("Pamela", 3))
val map = friends.associateBy(keySelector = { person -> person.id }, valueTransform = { person -> person.name })
//val map = friends.associateBy({ it.id }, { it.name }) // also works
println(map) // prints: {1=Sue Helen, 2=JR, 3=Pamela}
}
If you have duplicates in your list that you don't want to lose, you can do this using groupBy.
Otherwise, like everyone else said, use associate/By/With (which in the case of duplicates, I believe, will only return the last value with that key).
An example grouping a list of people by age:
class Person(val name: String, val age: Int)
fun main() {
val people = listOf(Person("Sue Helen", 31), Person("JR", 25), Person("Pamela", 31))
val duplicatesKept = people.groupBy { it.age }
val duplicatesLost = people.associateBy({ it.age }, { it })
println(duplicatesKept)
println(duplicatesLost)
}
Results:
{31=[Person#41629346, Person#4eec7777], 25=[Person#3b07d329]}
{31=Person#4eec7777, 25=Person#3b07d329}
Convert a Iteratable Sequence Elements to a Map in kotlin ,
associate vs associateBy vs associateWith:
*Reference:Kotlin Documentation
1- associate (to set both Keys & Values): Build a map that can set key & value elements :
IterableSequenceElements.associate { newKey to newValue } //Output => Map {newKey : newValue ,...}
If any of two pairs would have the same key the last one gets added to the map.
The returned map preserves the entry iteration order of the original array.
2- associateBy (just set Keys by calculation): Build a map that we can set new Keys, analogous elements will be set for values
IterableSequenceElements.associateBy { newKey } //Result: => Map {newKey : 'Values will be set from analogous IterableSequenceElements' ,...}
3- associateWith (just set Values by calculation): Build a map that we can set new Values, analogous elements will be set for Keys
IterableSequenceElements.associateWith { newValue } //Result => Map { 'Keys will be set from analogous IterableSequenceElements' : newValue , ...}
Example from Kotlin tips :
You can use associate for this task:
val list = listOf("a", "b", "c", "d")
val m: Map<String, Int> = list.associate { it to it.length }
In this example, the strings from list become the keys and their corresponding lengths (as an example) become the values inside the map.
That have changed on the RC version.
I am using val map = list.groupByTo(destinationMap, {it.facebookId}, { it -> it.point })

How can I create new map with new values but same keys from an existing map?

I have an existing map in Groovy.
I want to create a new map that has the same keys but different values in it.
Eg.:
def scores = ["vanilla":10, "chocolate":9, "papaya": 0]
//transformed into
def preference = ["vanilla":"love", "chocolate":"love", "papaya": "hate"]
Any way of doing it through some sort of closure like:
def preference = scores.collect {//something}
You can use collectEntries
scores.collectEntries { k, v ->
[ k, 'new value' ]
}
An alternative to using a map for the ranges would be to use a switch
def grade = { score ->
switch( score ) {
case 10..9: return 'love'
case 8..6: return 'like'
case 5..2: return 'meh'
case 1..0: return 'hate'
default : return 'ERR'
}
}
scores.collectEntries { k, v -> [ k, grade( v ) ] }
Nice, functional style solution(including your ranges, and easy to modify):
def scores = [vanilla:10, chocolate:9, papaya: 0]
// Store somewhere
def map = [(10..9):"love", (8..6):"like", (5..2):"meh", (1..0):"hate"]
def preference = scores.collectEntries { key, score -> [key, map.find { score in it.key }.value] }
// Output: [vanilla:love, chocolate:love, papaya:hate]
def scores = ["vanilla":10, "chocolate":9, "papaya": 0]
def preference = scores.collectEntries {key, value -> ["$key":(value > 5 ? "like" : "hate")]}
Then the result would be
[vanilla:like, chocolate:like, papaya:hate]
EDIT: If you want a map, then you should use collectEntries like tim_yates said.

Resources