Performance and memory problems in rust async program - asynchronous

I wanted to benchmark requests from rust to particular service using async client, and created async benchmarker for that.
This function should run specified amount of concurrent threads (actually, parallel chains of futures) for specified duration and report count of iterations achieved.
use futures::future;
use futures::prelude::*;
use std::error::Error;
use std::time::{Duration, Instant};
use std::{cell, io, rc};
use tokio::runtime::current_thread::Runtime;
use tokio::timer;
struct Config {
workers: u32,
duration: Duration,
}
/// Build infinitely repeating future
fn cycle<'a, F: Fn() -> P + 'a, P: Future + 'a>(
f: F,
) -> Box<dyn Future<Item = (), Error = P::Error> + 'a> {
Box::new(f().and_then(move |_| cycle(f)))
}
fn benchmark<'a, F: Fn() -> P + 'a, P: Future<Error = io::Error> + 'a>(
config: Config,
f: F,
) -> impl Future<Item = u32, Error = io::Error> + 'a {
let counter = rc::Rc::new(cell::Cell::new(0u32));
let f = rc::Rc::new(f);
future::select_all((0..config.workers).map({
let counter = rc::Rc::clone(&counter);
move |_| {
let counter = rc::Rc::clone(&counter);
let f = rc::Rc::clone(&f);
cycle(move || {
let counter = rc::Rc::clone(&counter);
f().map(move |_| {
counter.set(counter.get() + 1);
})
})
}
}))
.map(|((), _, _)| ())
.map_err(|(err, _, _)| err)
.select(
timer::Delay::new(Instant::now() + config.duration)
.map_err(|err| io::Error::new(io::ErrorKind::Other, err.description())),
)
.map(move |((), _)| counter.get())
.map_err(|(err, _)| err)
}
fn main() {
let duration = std::env::args()
.skip(1)
.next()
.expect("Please provide duration in seconds")
.parse()
.expect("Duration must be integer number");
let ms = Duration::from_millis(1);
let mut rt = Runtime::new().expect("Could not create runtime");
loop {
let iters = rt
.block_on(
benchmark(
Config {
workers: 65536,
duration: Duration::from_secs(duration),
},
|| {
/// Substitute actual benchmarked call
timer::Delay::new(Instant::now() + ms)
.map_err(|err| panic!("Failed to set delay: {:?}", err))
},
)
.map_err(|err| panic!("Benchamrking error: {:?}", err)),
)
.expect("Runtime error");
println!("{} iters/sec", iters as u64 / duration);
}
}
However, the result this benchmark reports and memory consumption degrades with increase of benchmark duration, e.g. on my pc:
cargo run --release 1 ~ 900k iterations/sec
cargo run --release 2 ~ 700k iterations/sec
cargo run --release 10 ~ 330k iterations/sec
Also, memory usage rapidly grows as benchmark function runs. I tried using valgrind to find memory leak but it only reports that all allocated memory can still be reached.
How can I fix the issue?

It looks like the Box returned by cycle is not freed until the end of benchmark, and the memory allocation / deallocation take more and more time.
I have rewritten your program with async_await, without the Box and the results are now consistent :
#![feature(async_await)]
use futures::{compat::Future01CompatExt, future, prelude::*, select};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::{Duration, Instant};
use tokio::timer;
struct Config {
workers: u32,
duration: Duration,
}
// Build infinitely repeating future
async fn cycle<'a, F: Fn() -> P + 'a, P: Future<Output = ()> + 'a>(f: F) {
loop {
f().await;
}
}
async fn benchmark<'a, F: Fn() -> P + 'a, P: Future<Output = ()> + 'a>(
config: Config,
f: F,
) -> usize {
let counter = AtomicUsize::new(0);
let infinite_counter = future::select_all((0..config.workers).map(|_| {
cycle(|| {
f().map(|_| {
counter.fetch_add(1, Ordering::SeqCst);
})
})
.boxed_local()
}));
let timer = timer::Delay::new(Instant::now() + config.duration)
.compat()
.unwrap_or_else(|_| panic!("Boom !"));
select! {
a = infinite_counter.fuse() => (),
b = timer.fuse() => (),
};
counter.load(Ordering::SeqCst)
}
fn main() {
let duration = std::env::args()
.skip(1)
.next()
.expect("Please provide duration in seconds")
.parse()
.expect("Duration must be integer number");
let ms = Duration::from_millis(1);
// Use actix_rt runtime instead of vanilla tokio because I want
// to restrict to one OS thread and avoid needing async primitives
let mut rt = actix_rt::Runtime::new().expect("Could not create runtime");;
loop {
let iters = rt
.block_on(
benchmark(
Config {
workers: 65536,
duration: Duration::from_secs(duration),
},
|| {
// Substitute actual benchmarked call
timer::Delay::new(Instant::now() + ms)
.compat()
.unwrap_or_else(|_| panic!("Boom !"))
},
)
.boxed_local()
.unit_error()
.compat(),
)
.expect("Runtime error");
println!("{} iters/sec", iters as u64 / duration);
}
}
It's my first time with futures 0.3, so I don't really get some parts like the select! syntax, or the boxed_local, but it works !
EDIT: Here is the dependencies block from Cargo.toml
[dependencies]
futures-preview = { version = "0.3.0-alpha", features = ["nightly", "compat", "async-await"] }
tokio = "0.1.22"
actix-rt = "0.2.3"

So it turns out cycle really was a culprit as Gregory suspected. I found this useful function in futures crate: loop_fn, and rewritten cycle using it:
/// Build infinitely repeating future
fn cycle<'a, F: Fn() -> P + 'a, P: Future + 'a>(
f: F,
) -> impl Future<Item = (), Error = P::Error> + 'a {
future::loop_fn((), move |_| f().map(|_| future::Loop::Continue(())))
}
The rest of the code remains the same. Now this compiles with stable rust and even reports almost twice as much iterations per second as proposed nightly futures solution (for what it's worth with this synthetic test).

Related

How to find number of active tokio task?

I would like to get the count of active running tokio tasks. In python, I can use len(asyncio.all_tasks()) which returns the unfinished tasks for the current running loop. I would like to know any equivalent in tokio.
Here is a sample code:
use std::time::Duration;
use tokio; // 1.24.1
use tokio::time::sleep;
fn active_tasks() -> usize {
todo!("get active task somehow")
}
#[tokio::main]
async fn main() {
tokio::spawn(async { sleep(Duration::from_secs(5)).await });
tokio::spawn(async { sleep(Duration::from_secs(1)).await });
tokio::spawn(async { sleep(Duration::from_secs(3)).await });
println!("t = 0, running = {}", active_tasks());
sleep(Duration::from_secs(2)).await;
println!("t = 2, running = {}", active_tasks());
sleep(Duration::from_secs(4)).await;
println!("t = 6, running = {}", active_tasks());
}
I expect the output of the above program to print number of active task, since main itself is a tokio task, I would not be surprised to find the following output:
t = 0, running = 4
t = 2, running = 3
t = 6, running = 1
active_tasks() can be an async function if required.
I was hoping that the unstable RuntimeMetrics would be albe to solve this for you, but it seems designed for a different purpose. I don't believe Tokio will be able to handle this for you.
With that said, here's a potential solution to achieve a similar result:
use std::{
future::Future,
sync::{Arc, Mutex},
time::Duration,
};
use tokio::time::sleep;
struct ThreadManager {
thread_count: Arc<Mutex<usize>>,
}
impl ThreadManager {
#[must_use]
fn new() -> Self {
Self {
thread_count: Arc::new(Mutex::new(0)),
}
}
fn spawn<T>(&self, future: T)
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
// Increment the internal count just before the thread starts.
let count = Arc::clone(&self.thread_count);
*count.lock().unwrap() += 1;
tokio::spawn(async move {
let result = future.await;
// Once we've executed the future, let's decrement this thread.
*count.lock().unwrap() -= 1;
result
});
}
fn thread_count(&self) -> usize {
// Get a copy of the current thread count.
*Arc::clone(&self.thread_count).lock().unwrap()
}
}
#[tokio::main]
async fn main() {
let manager = ThreadManager::new();
manager.spawn(async { sleep(Duration::from_secs(5)).await });
manager.spawn(async { sleep(Duration::from_secs(1)).await });
manager.spawn(async { sleep(Duration::from_secs(3)).await });
println!("t = 0, running = {}", manager.thread_count());
sleep(Duration::from_secs(2)).await;
println!("t = 2, running = {}", manager.thread_count());
sleep(Duration::from_secs(4)).await;
println!("t = 6, running = {}", manager.thread_count());
}
And the result is:
t = 0, running = 3
t = 2, running = 2
t = 6, running = 0
This will do approximately what you're describing. To get a little closer to what you're looking for, you can combine the manager with lazy_static and wrap it in a function called spawn or something. You can also start the counter at 1 to account for the main thread.

Async move closure vs. fold

Async closures are still unstable in Rust, as pointed out in the related question What is the difference between |_| async move {} and async move |_| {}, the answer to which I do not really understand.
As far as I do understand, the following is not an async closure:
let mut sum: i32 = 0;
stream::iter(1..25)
.map(compute)
.buffered(12)
.for_each(|result| async move { sum+=result; })
.await;
println!("->{}", sum);
I was baffled by this, initially: sum is used in the for_each, but it is not moved, otherwise the println! would produce a compiler error. The compiler gives a warning, though, that the "value assigned to sum is never read". But, in fact, sum is copied.
Here is the complete example code
use futures::{stream, StreamExt};
use rand::{thread_rng, Rng};
use std::time::Duration;
async fn compute(i: i32) -> i32 {
let mut rng = thread_rng();
let sleep_ms: u64 = rng.gen_range(0..1000);
tokio::time::sleep(Duration::from_millis(sleep_ms)).await;
println!("#{} done", i);
i * i
}
async fn sum_with_fold() {
let sum = stream::iter(1..25)
.map(compute)
.buffered(12)
.fold(0, |sum,x| async move {sum+x} )
.await;
println!("->{}", sum);
}
async fn sum_with_closure() {
let mut sum: i32 = 0;
stream::iter(1..25)
.map(compute)
.buffered(12)
.for_each(|result| async move { sum+=result; })
.await;
println!("->{}", sum);
}
#[tokio::main]
async fn main() {
sum_with_fold().await;
sum_with_closure().await;
}
// Cargo.toml:
// [dependencies]
// futures = "0.3"
// rand = "0.8"
// tokio = { version = "1", features = ["full"] }
The fold works correctly, whereas sum_with_closure works on a copied sum and this sum cannot be retrieved.
Am I getting this right, and can it be fixed? I.e. is there a way to do the fold with a closure like this? Or am I indeed running into the unstable async closure feature?
This can be done with for_each, but current Rust can't check at compile-time that the lifetimes are correct so you need to use a RefCell to enable run-time checking (with a small performance cost):
async fn sum_with_closure() {
use std::cell::RefCell;
let sum = RefCell::new (0);
let sumref = ∑
stream::iter(1..25)
.map(compute)
.buffered(12)
.for_each(|result| async move { *sumref.borrow_mut() +=result; })
.await;
println!("->{}", sum.into_inner());
}
Playground

How can I mutate the HTML inside a hyper::Response? [duplicate]

I want to write a server using the current master branch of Hyper that saves a message that is delivered by a POST request and sends this message to every incoming GET request.
I have this, mostly copied from the Hyper examples directory:
extern crate futures;
extern crate hyper;
extern crate pretty_env_logger;
use futures::future::FutureResult;
use hyper::{Get, Post, StatusCode};
use hyper::header::{ContentLength};
use hyper::server::{Http, Service, Request, Response};
use futures::Stream;
struct Echo {
data: Vec<u8>,
}
impl Echo {
fn new() -> Self {
Echo {
data: "text".into(),
}
}
}
impl Service for Echo {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = FutureResult<Response, hyper::Error>;
fn call(&self, req: Self::Request) -> Self::Future {
let resp = match (req.method(), req.path()) {
(&Get, "/") | (&Get, "/echo") => {
Response::new()
.with_header(ContentLength(self.data.len() as u64))
.with_body(self.data.clone())
},
(&Post, "/") => {
//self.data.clear(); // argh. &self is not mutable :(
// even if it was mutable... how to put the entire body into it?
//req.body().fold(...) ?
let mut res = Response::new();
if let Some(len) = req.headers().get::<ContentLength>() {
res.headers_mut().set(ContentLength(0));
}
res.with_body(req.body())
},
_ => {
Response::new()
.with_status(StatusCode::NotFound)
}
};
futures::future::ok(resp)
}
}
fn main() {
pretty_env_logger::init().unwrap();
let addr = "127.0.0.1:12346".parse().unwrap();
let server = Http::new().bind(&addr, || Ok(Echo::new())).unwrap();
println!("Listening on http://{} with 1 thread.", server.local_addr().unwrap());
server.run().unwrap();
}
How do I turn the req.body() (which seems to be a Stream of Chunks) into a Vec<u8>? I assume I must somehow return a Future that consumes the Stream and turns it into a single Vec<u8>, maybe with fold(). But I have no clue how to do that.
Hyper 0.13 provides a body::to_bytes function for this purpose.
use hyper::body;
use hyper::{Body, Response};
pub async fn read_response_body(res: Response<Body>) -> Result<String, hyper::Error> {
let bytes = body::to_bytes(res.into_body()).await?;
Ok(String::from_utf8(bytes.to_vec()).expect("response was not valid utf-8"))
}
I'm going to simplify the problem to just return the total number of bytes, instead of echoing the entire stream.
Futures 0.3
Hyper 0.13 + TryStreamExt::try_fold
See euclio's answer about hyper::body::to_bytes if you just want all the data as one giant blob.
Accessing the stream allows for more fine-grained control:
use futures::TryStreamExt; // 0.3.7
use hyper::{server::Server, service, Body, Method, Request, Response}; // 0.13.9
use std::convert::Infallible;
use tokio; // 0.2.22
#[tokio::main]
async fn main() {
let addr = "127.0.0.1:12346".parse().expect("Unable to parse address");
let server = Server::bind(&addr).serve(service::make_service_fn(|_conn| async {
Ok::<_, Infallible>(service::service_fn(echo))
}));
println!("Listening on http://{}.", server.local_addr());
if let Err(e) = server.await {
eprintln!("Error: {}", e);
}
}
async fn echo(req: Request<Body>) -> Result<Response<Body>, hyper::Error> {
let (parts, body) = req.into_parts();
match (parts.method, parts.uri.path()) {
(Method::POST, "/") => {
let entire_body = body
.try_fold(Vec::new(), |mut data, chunk| async move {
data.extend_from_slice(&chunk);
Ok(data)
})
.await;
entire_body.map(|body| {
let body = Body::from(format!("Read {} bytes", body.len()));
Response::new(body)
})
}
_ => {
let body = Body::from("Can only POST to /");
Ok(Response::new(body))
}
}
}
Unfortunately, the current implementation of Bytes is no longer compatible with TryStreamExt::try_concat, so we have to switch back to a fold.
Futures 0.1
hyper 0.12 + Stream::concat2
Since futures 0.1.14, you can use Stream::concat2 to stick together all the data into one:
fn concat2(self) -> Concat2<Self>
where
Self: Sized,
Self::Item: Extend<<Self::Item as IntoIterator>::Item> + IntoIterator + Default,
use futures::{
future::{self, Either},
Future, Stream,
}; // 0.1.25
use hyper::{server::Server, service, Body, Method, Request, Response}; // 0.12.20
use tokio; // 0.1.14
fn main() {
let addr = "127.0.0.1:12346".parse().expect("Unable to parse address");
let server = Server::bind(&addr).serve(|| service::service_fn(echo));
println!("Listening on http://{}.", server.local_addr());
let server = server.map_err(|e| eprintln!("Error: {}", e));
tokio::run(server);
}
fn echo(req: Request<Body>) -> impl Future<Item = Response<Body>, Error = hyper::Error> {
let (parts, body) = req.into_parts();
match (parts.method, parts.uri.path()) {
(Method::POST, "/") => {
let entire_body = body.concat2();
let resp = entire_body.map(|body| {
let body = Body::from(format!("Read {} bytes", body.len()));
Response::new(body)
});
Either::A(resp)
}
_ => {
let body = Body::from("Can only POST to /");
let resp = future::ok(Response::new(body));
Either::B(resp)
}
}
}
You could also convert the Bytes into a Vec<u8> via entire_body.to_vec() and then convert that to a String.
See also:
How do I convert a Vector of bytes (u8) to a string
hyper 0.11 + Stream::fold
Similar to Iterator::fold, Stream::fold takes an accumulator (called init) and a function that operates on the accumulator and an item from the stream. The result of the function must be another future with the same error type as the original. The total result is itself a future.
fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
where
F: FnMut(T, Self::Item) -> Fut,
Fut: IntoFuture<Item = T>,
Self::Error: From<Fut::Error>,
Self: Sized,
We can use a Vec as the accumulator. Body's Stream implementation returns a Chunk. This implements Deref<[u8]>, so we can use that to append each chunk's data to the Vec.
extern crate futures; // 0.1.23
extern crate hyper; // 0.11.27
use futures::{Future, Stream};
use hyper::{
server::{Http, Request, Response, Service}, Post,
};
fn main() {
let addr = "127.0.0.1:12346".parse().unwrap();
let server = Http::new().bind(&addr, || Ok(Echo)).unwrap();
println!(
"Listening on http://{} with 1 thread.",
server.local_addr().unwrap()
);
server.run().unwrap();
}
struct Echo;
impl Service for Echo {
type Request = Request;
type Response = Response;
type Error = hyper::Error;
type Future = Box<futures::Future<Item = Response, Error = Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future {
match (req.method(), req.path()) {
(&Post, "/") => {
let f = req.body()
.fold(Vec::new(), |mut acc, chunk| {
acc.extend_from_slice(&*chunk);
futures::future::ok::<_, Self::Error>(acc)
})
.map(|body| Response::new().with_body(format!("Read {} bytes", body.len())));
Box::new(f)
}
_ => panic!("Nope"),
}
}
}
You could also convert the Vec<u8> body to a String.
See also:
How do I convert a Vector of bytes (u8) to a string
Output
When called from the command line, we can see the result:
$ curl -X POST --data hello http://127.0.0.1:12346/
Read 5 bytes
Warning
All of these solutions allow a malicious end user to POST an infinitely sized file, which would cause the machine to run out of memory. Depending on the intended use, you may wish to establish some kind of cap on the number of bytes read, potentially writing to the filesystem at some breakpoint.
See also:
How do I apply a limit to the number of bytes read by futures::Stream::concat2?
Most of the answers on this topic are outdated or overly complicated. The solution is pretty simple:
/*
WARNING for beginners!!! This use statement
is important so we can later use .data() method!!!
*/
use hyper::body::HttpBody;
let my_vector: Vec<u8> = request.into_body().data().await.unwrap().unwrap().to_vec();
let my_string = String::from_utf8(my_vector).unwrap();
You can also use body::to_bytes as #euclio answered. Both approaches are straight-forward! Don't forget to handle unwrap properly.

How can I stop running synchronous code when the future wrapping it is dropped?

I have asynchronous code which calls synchronous code that takes a while to run, so I followed the suggestions outlined in What is the best approach to encapsulate blocking I/O in future-rs?. However, my asynchronous code has a timeout, after which I am no longer interested in the result of the synchronous calculation:
use std::{thread, time::Duration};
use tokio::{task, time}; // 0.2.10
// This takes 1 second
fn long_running_complicated_calculation() -> i32 {
let mut sum = 0;
for i in 0..10 {
thread::sleep(Duration::from_millis(100));
eprintln!("{}", i);
sum += i;
// Interruption point
}
sum
}
#[tokio::main]
async fn main() {
let handle = task::spawn_blocking(long_running_complicated_calculation);
let guarded = time::timeout(Duration::from_millis(250), handle);
match guarded.await {
Ok(s) => panic!("Sum was calculated: {:?}", s),
Err(_) => eprintln!("Sum timed out (expected)"),
}
}
Running this code shows that the timeout fires, but the synchronous code also continues to run:
0
1
Sum timed out (expected)
2
3
4
5
6
7
8
9
How can I stop running the synchronous code when the future wrapping it is dropped?
I don't expect that the compiler will magically be able to stop my synchronous code. I've annotated a line with "interruption point" where I'd be required to manually put some kind of check to exit early from my function, but I don't know how to easily get a notification that the result of spawn_blocking (or ThreadPool::spawn_with_handle, for pure futures-based code) has been dropped.
You can pass an atomic boolean which you then use to flag the task as needing cancellation. (I'm not sure I'm using an appropriate Ordering for the load/store calls, that probably needs some more consideration)
Here's a modified version of your code that outputs
0
1
Sum timed out (expected)
2
Interrupted...
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::{thread, time::Duration};
use tokio::{task, time}; // 0.2.10
// This takes 1 second
fn long_running_complicated_calculation(flag: &AtomicBool) -> i32 {
let mut sum = 0;
for i in 0..10 {
thread::sleep(Duration::from_millis(100));
eprintln!("{}", i);
sum += i;
// Interruption point
if !flag.load(Ordering::Relaxed) {
eprintln!("Interrupted...");
break;
}
}
sum
}
#[tokio::main]
async fn main() {
let some_bool = Arc::new(AtomicBool::new(true));
let some_bool_clone = some_bool.clone();
let handle =
task::spawn_blocking(move || long_running_complicated_calculation(&some_bool_clone));
let guarded = time::timeout(Duration::from_millis(250), handle);
match guarded.await {
Ok(s) => panic!("Sum was calculated: {:?}", s),
Err(_) => {
eprintln!("Sum timed out (expected)");
some_bool.store(false, Ordering::Relaxed);
}
}
}
playground
It's not really possible to get this to happen automatically on the dropping of the futures / handles with current Tokio. Some work towards this is being done in http://github.com/tokio-rs/tokio/issues/1830 and http://github.com/tokio-rs/tokio/issues/1879.
However, you can get something similar by wrapping the futures in a custom type.
Here's an example which looks almost the same as the original code, but with the addition of a simple wrapper type in a module. It would be even more ergonomic if I implemented Future<T> on the wrapper type that just forwards to the wrapped handle, but that was proving tiresome.
mod blocking_cancelable_task {
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::task;
pub struct BlockingCancelableTask<T> {
pub h: Option<tokio::task::JoinHandle<T>>,
flag: Arc<AtomicBool>,
}
impl<T> Drop for BlockingCancelableTask<T> {
fn drop(&mut self) {
eprintln!("Dropping...");
self.flag.store(false, Ordering::Relaxed);
}
}
impl<T> BlockingCancelableTask<T>
where
T: Send + 'static,
{
pub fn new<F>(f: F) -> BlockingCancelableTask<T>
where
F: FnOnce(&AtomicBool) -> T + Send + 'static,
{
let flag = Arc::new(AtomicBool::new(true));
let flag_clone = flag.clone();
let h = task::spawn_blocking(move || f(&flag_clone));
BlockingCancelableTask { h: Some(h), flag }
}
}
pub fn spawn<F, T>(f: F) -> BlockingCancelableTask<T>
where
T: Send + 'static,
F: FnOnce(&AtomicBool) -> T + Send + 'static,
{
BlockingCancelableTask::new(f)
}
}
use std::sync::atomic::{AtomicBool, Ordering};
use std::{thread, time::Duration};
use tokio::time; // 0.2.10
// This takes 1 second
fn long_running_complicated_calculation(flag: &AtomicBool) -> i32 {
let mut sum = 0;
for i in 0..10 {
thread::sleep(Duration::from_millis(100));
eprintln!("{}", i);
sum += i;
// Interruption point
if !flag.load(Ordering::Relaxed) {
eprintln!("Interrupted...");
break;
}
}
sum
}
#[tokio::main]
async fn main() {
let mut h = blocking_cancelable_task::spawn(long_running_complicated_calculation);
let guarded = time::timeout(Duration::from_millis(250), h.h.take().unwrap());
match guarded.await {
Ok(s) => panic!("Sum was calculated: {:?}", s),
Err(_) => {
eprintln!("Sum timed out (expected)");
}
}
}
playground

How would I make a TcpClient request per item in a futures Stream?

I have a concept project where the client sends a server a number (PrimeClientRequest), the server computes if the value is prime or not, and returns a response (PrimeClientResponse). I want the client to be a simple CLI which prompts the user for a number, sends the request to the server, and displays the response. Ideally I want to do this using TcpClient from Tokio and Streams from Futures-Rs.
I've written a Tokio server using services and I want to reuse the same codec and proto for the client.
Part of the client is a function called read_prompt which returns a Stream. Essentially it is an infinite loop at which each iteration reads in some input from stdin.
Here's the relevant code:
main.rs
use futures::{Future, Stream};
use std::env;
use std::net::SocketAddr;
use tokio_core::reactor::Core;
use tokio_prime::protocol::PrimeClientProto;
use tokio_prime::request::PrimeRequest;
use tokio_proto::TcpClient;
use tokio_service::Service;
mod cli;
fn main() {
let mut core = Core::new().unwrap();
let handle = core.handle();
let addr_string = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let remote_addr = addr_string.parse::<SocketAddr>().unwrap();
println!("Connecting on {}", remote_addr);
let tcp_client = TcpClient::new(PrimeClientProto).connect(&remote_addr, &handle);
core.run(tcp_client.and_then(|client| {
client
.call(PrimeRequest { number: Ok(0) })
.and_then(|response| {
println!("RESP = {:?}", response);
Ok(())
})
})).unwrap();
}
cli.rs
use futures::{Future, Sink, Stream};
use futures::sync::mpsc;
use std::{io, thread};
use std::io::{Stdin, Stdout};
use std::io::prelude::*;
pub fn read_prompt() -> impl Stream<Item = u64, Error = ()> {
let (tx, rx) = mpsc::channel(1);
thread::spawn(move || loop {
let thread_tx = tx.clone();
let input = prompt(io::stdout(), io::stdin()).unwrap();
let parsed_input = input
.parse::<u64>()
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid u64"));
thread_tx.send(parsed_input.unwrap()).wait().unwrap();
});
rx
}
fn prompt(stdout: Stdout, stdin: Stdin) -> io::Result<String> {
let mut stdout_handle = stdout.lock();
stdout_handle.write(b"> ")?;
stdout_handle.flush()?;
let mut buf = String::new();
let mut stdin_handle = stdin.lock();
stdin_handle.read_line(&mut buf)?;
Ok(buf.trim().to_string())
}
With the code above, the client sends a single request to the server before the client terminates. I want to be able to use the stream generated from read_prompt to provide input to the TcpClient and make a request per item in the stream. How would I go about doing this?
The full code can be found at joshleeb/tokio-prime.
The solution I have come up with (so far) has been to use the LoopFn in the Future-Rs crate. It's not ideal as a new connection still has to be made but it is at least a step in the right direction.
main.rs
use futures::{future, Future};
use std::{env, io};
use std::net::SocketAddr;
use tokio_core::reactor::{Core, Handle};
use tokio_prime::protocol::PrimeClientProto;
use tokio_prime::request::PrimeRequest;
use tokio_proto::TcpClient;
use tokio_service::Service;
mod cli;
fn handler<'a>(
handle: &'a Handle, addr: &'a SocketAddr
) -> impl Future<Item = (), Error = ()> + 'a {
cli::prompt(io::stdin(), io::stdout())
.and_then(move |number| {
TcpClient::new(PrimeClientProto)
.connect(addr, handle)
.and_then(move |client| Ok((client, number)))
})
.and_then(|(client, number)| {
client
.call(PrimeRequest { number: Ok(number) })
.and_then(|response| {
println!("{:?}", response);
Ok(())
})
})
.or_else(|err| {
println!("! {}", err);
Ok(())
})
}
fn main() {
let mut core = Core::new().unwrap();
let handle = core.handle();
let addr_string = env::args().nth(1).unwrap_or("127.0.0.1:8080".to_string());
let remote_addr = addr_string.parse::<SocketAddr>().unwrap();
println!("Connecting on {}", remote_addr);
let client = future::loop_fn((), |_| {
handler(&handle, &remote_addr)
.map(|_| -> future::Loop<(), ()> { future::Loop::Continue(()) })
});
core.run(client).ok();
}
cli.rs
use futures::prelude::*;
use std::io;
use std::io::{Stdin, Stdout};
use std::io::prelude::*;
#[async]
pub fn prompt(stdin: Stdin, stdout: Stdout) -> io::Result<u64> {
let mut stdout_handle = stdout.lock();
stdout_handle.write(b"> ")?;
stdout_handle.flush()?;
let mut buf = String::new();
let mut stdin_handle = stdin.lock();
stdin_handle.read_line(&mut buf)?;
parse_input(buf.trim().to_string())
}
fn parse_input(s: String) -> io::Result<u64> {
s.parse::<u64>()
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid u64"))
}

Resources