From: Ralf Jung Date: Sat, 18 Jul 2015 14:42:15 +0000 (+0200) Subject: split part 11 into two, and explain interior mutability and Cell and RefCell in the... X-Git-Url: https://git.ralfj.de/rust-101.git/commitdiff_plain/188b1ec1b8528e2326791feccc8077e15bd60182?ds=inline;hp=-c split part 11 into two, and explain interior mutability and Cell and RefCell in the new part 12 --- 188b1ec1b8528e2326791feccc8077e15bd60182 diff --git a/solutions/src/callbacks.rs b/solutions/src/callbacks.rs new file mode 100644 index 0000000..93fcb17 --- /dev/null +++ b/solutions/src/callbacks.rs @@ -0,0 +1,66 @@ +use std::rc::Rc; +use std::cell::RefCell; + +#[derive(Clone)] +pub struct Callbacks { + callbacks: Vec>>, +} + +impl Callbacks { + pub fn new() -> Self { + Callbacks { callbacks: Vec::new() } /*@*/ + } + + pub fn register(&mut self, callback: F) { + let cell = Rc::new(RefCell::new(callback)); + self.callbacks.push(cell); /*@*/ + } + + pub fn call(&self, val: i32) { + for callback in self.callbacks.iter() { + // We have to *explicitly* borrow the contents of a `RefCell`. + //@ At run-time, the cell will keep track of the number of outstanding shared and mutable borrows, + //@ and panic if the rules are violated. Since this function is the only one that borrow the + //@ environments of the closures, and this function requires a *mutable* borrow of `self`, we know this cannot + //@ happen.
+ //@ For this check to be performed, `closure` is a *guard*: Rather than a normal borrow, `borrow_mut` returns + //@ a smart pointer (`RefMut`, in this case) that waits until is goes out of scope, and then + //@ appropriately updates the number of active borrows. + //@ + //@ The function would still typecheck with an immutable borrow of `self` (since we are + //@ relying on the interior mutability of `self`), but then it could happen that a callback + //@ will in turn trigger another round of callbacks, so that `call` would indirectly call itself. + //@ This is called reentrancy. It would imply that we borrow the closure a second time, and + //@ panic at run-time. I hope this also makes it clear that there's absolutely no hope of Rust + //@ performing these checks statically, at compile-time: It would have to detect reentrancy! + let mut closure = callback.borrow_mut(); + // Unfortunately, Rust's auto-dereference of pointers is not clever enough here. We thus have to explicitly + // dereference the smart pointer and obtain a mutable borrow of the target. + (&mut *closure)(val); + } + } +} + +#[cfg(test)] +mod tests { + use std::rc::Rc; + use std::cell::RefCell; + use super::*; + + #[test] + #[should_panic] + fn test_reentrant() { + let c = Rc::new(RefCell::new(Callbacks::new())); + c.borrow_mut().register(|val| println!("Callback called: {}", val) ); + + // If we change the two "borrow" below to "borrow_mut", you can get a panic even with a "call" that requires a + // mutable borrow. However, that panic is then triggered by our own, external `RefCell` (so it's kind of our fault), + // rather than being triggered by the `RefCell` in the `Callbacks`. + { + let c2 = c.clone(); + c.borrow_mut().register(move |val| c2.borrow().call(val+val) ); + } + + c.borrow().call(42); + } +} \ No newline at end of file diff --git a/solutions/src/counter.rs b/solutions/src/counter.rs index 265fb99..afea9d0 100644 --- a/solutions/src/counter.rs +++ b/solutions/src/counter.rs @@ -2,7 +2,7 @@ use std::sync::{Arc, RwLock}; use std::thread; #[derive(Clone)] -struct ConcurrentCounter(Arc>); +pub struct ConcurrentCounter(Arc>); impl ConcurrentCounter { // The constructor should not be surprising. @@ -15,6 +15,13 @@ impl ConcurrentCounter { *counter = *counter + by; } + pub fn compare_and_inc(&self, test: usize, by: usize) { + let mut counter = self.0.write().unwrap(); + if *counter == test { + *counter += by; + } + } + pub fn get(&self) -> usize { let counter = self.0.read().unwrap(); *counter diff --git a/solutions/src/main.rs b/solutions/src/main.rs index be6e3d5..0242f49 100644 --- a/solutions/src/main.rs +++ b/solutions/src/main.rs @@ -9,6 +9,7 @@ pub mod bigint; pub mod vec; pub mod rgrep; pub mod counter; +pub mod callbacks; pub fn main() { rgrep::main(); diff --git a/src/main.rs b/src/main.rs index 4290e1e..1111443 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,9 +36,9 @@ // --------------- // // You will need to have Rust installed, of course. It is available for download on -// [the Rust website](http://www.rust-lang.org/). You should go for either the "stable" -// or the "beta" channel. More detailed installation instructions are provided in -// [the second chapter of The Book](https://doc.rust-lang.org/stable/book/installing-rust.html). +// [the Rust website](http://www.rust-lang.org/). Make sure you get at least version 1.2 +// (at the time of writing, that's the current beta release). More detailed installation +// instructions are provided in [the second chapter of The Book](https://doc.rust-lang.org/stable/book/installing-rust.html). // This will also install `cargo`, the tool responsible for building rust projects (or *crates*). // Next, fetch the Rust-101 source code from the [git repository](http://www.ralfj.de/git/rust-101.git) @@ -77,13 +77,14 @@ // * [Part 08: Associated Types, Modules](part08.html) // * [Part 09: Iterators](part09.html) // * [Part 10: Closures](part10.html) -// * [Part 11: Trait Objects, Box, Rc, Lifetime bounds](part11.html) -// * [Part 12: Concurrency, Arc, Send](part12.html) -// * [Part 13: Slices, Arrays, External Dependencies](part13.html) // // ### Advanced Rust // -// * [Part 14: Mutex, Interior Mutability, Sync](part14.html) +// * [Part 11: Trait Objects, Box, Lifetime bounds](part11.html) +// * [Part 12: Rc, Interior Mutability, Cell, RefCell](part12.html) +// * [Part 13: Concurrency, Arc, Send](part13.html) +// * [Part 14: Slices, Arrays, External Dependencies](part14.html) +// * [Part 15: Mutex, Interior Mutability (cont.), Sync](part15.html) // * (to be continued) // #![allow(dead_code, unused_imports, unused_variables, unused_mut, unreachable_code)] diff --git a/src/part11.rs b/src/part11.rs index cfe6c20..5cc1462 100644 --- a/src/part11.rs +++ b/src/part11.rs @@ -1,156 +1,120 @@ -// Rust-101, Part 11: Trait Objects, Box, Rc, Lifetime bounds -// ========================================================== +// Rust-101, Part 11: Trait Objects, Box, Lifetime bounds +// ====================================================== //@ We will play around with closures a bit more. Let us implement some kind of generic "callback" -//@ mechanism, providing two functions: Registering a new callback, and calling all registered callbacks. There will be two -//@ versions, so to avoid clashes of names, we put them into modules. -mod callbacks { - //@ First of all, we need to find a way to store the callbacks. Clearly, there will be a `Vec` involved, so that we can - //@ always grow the number of registered callbacks. A callback will be a closure, i.e., something implementing - //@ `FnMut(i32)` (we want to call this multiple times, so clearly `FnOnce` would be no good). So our first attempt may be the following. - // For now, we just decide that the callbacks have an argument of type `i32`. - struct CallbacksV1 { - callbacks: Vec, - } - //@ However, this will not work. Remember how the "type" of a closure is specific to the environment of captured variables. Different closures - //@ all implementing `FnMut(i32)` may have different types. However, a `Vec` is a *uniformly typed* vector. - - //@ We will thus need a way to store things of *different* types in the same vector. We know all these types implement `FnMut(i32)`. For this scenario, - //@ Rust provides *trait objects*: The truth is, `FnMut(i32)` is not just a trait. It is also a type, that can be given to anything implementing - //@ this trait. So, we may write the following. - /* struct CallbacksV2 { - callbacks: Vec, - } */ - //@ But, Rust complains about this definition. It says something about "Sized". What's the trouble? See, for many things we want to do, it is crucial that - //@ Rust knows the precise, fixed size of the type - that is, how large this type will be when represented in memory. For example, for a `Vec`, the - //@ elements are stored one right after the other. How should that be possible, without a fixed size? The trouble is, `FnMut(i32)` could be of any size. - //@ We don't know how large that "type that implemenets `FnMut(i32)`" is. Rust calls this an *unsized* type. Whenever we introduce a type variable, Rust - //@ will implicitly add a bound to that variable, demanding that it is sized. That's why we did not have to worry about this so far.
- //@ You can opt-out of this implicit bound by saying `T: ?Sized`. Then `T` may or may not be sized. - - //@ So, what can we do, if we can't store the callbacks in a vector? We can put them in a box. Semantically, `Box` is a lot like `T`: You fully own - //@ the data stored there. On the machine, however, `Box` is a *pointer* to `T`. It is a lot like `std::unique_ptr` in C++. In our current example, - //@ the important bit is that since it's a pointer, `T` can be unsized, but `Box` itself will always be sized. So we can put it in a `Vec`. - pub struct Callbacks { - callbacks: Vec>, - } - - impl Callbacks { - // Now we can provide some functions. The constructor should be straight-forward. - pub fn new() -> Self { - Callbacks { callbacks: Vec::new() } /*@*/ - } - - // Registration simply stores the callback. - pub fn register(&mut self, callback: Box) { - self.callbacks.push(callback); /*@*/ - } - - // And here we call all the stored callbacks. - pub fn call(&mut self, val: i32) { - // Since they are of type `FnMut`, we need to mutably iterate. Notice that boxes dereference implicitly. - for callback in self.callbacks.iter_mut() { - callback(val); /*@*/ - } - } - } - - // Now we are ready for the demo. - pub fn demo(c: &mut Callbacks) { - c.register(Box::new(|val| println!("Callback 1: {}", val))); - c.call(0); - - //@ We can even register callbacks that modify their environment. Rust will again attempt to borrow `count`. However, - //@ that doesn't work out this time: Since we want to put this thing in a `Box`, it could live longer than the function - //@ we are in. Then the borrow of `count` would become invalid. We have to explicitly tell Rust to `move` ownership of the - //@ variable into the closure. Its environment will then contain a `usize` rather than a `&mut uszie`, and have - //@ no effect on this local variable anymore. - let mut count: usize = 0; - c.register(Box::new(move |val| { - count = count+1; - println!("Callback 2, {}. time: {}", count, val); - } )); - c.call(1); c.call(2); - } +//@ mechanism, providing two functions: Registering a new callback, and calling all registered callbacks. + +//@ First of all, we need to find a way to store the callbacks. Clearly, there will be a `Vec` involved, so that we can +//@ always grow the number of registered callbacks. A callback will be a closure, i.e., something implementing +//@ `FnMut(i32)` (we want to call this multiple times, so clearly `FnOnce` would be no good). So our first attempt may be the following. +// For now, we just decide that the callbacks have an argument of type `i32`. +struct CallbacksV1 { + callbacks: Vec, } - -// Remember to edit `main.rs` to run the demo. -pub fn main() { - let mut c = callbacks::Callbacks::new(); - callbacks::demo(&mut c); +//@ However, this will not work. Remember how the "type" of a closure is specific to the environment of captured variables. Different closures +//@ all implementing `FnMut(i32)` may have different types. However, a `Vec` is a *uniformly typed* vector. + +//@ We will thus need a way to store things of *different* types in the same vector. We know all these types implement `FnMut(i32)`. For this scenario, +//@ Rust provides *trait objects*: The truth is, `FnMut(i32)` is not just a trait. It is also a type, that can be given to anything implementing +//@ this trait. So, we may write the following. +/* struct CallbacksV2 { + callbacks: Vec, +} */ +//@ But, Rust complains about this definition. It says something about "Sized". What's the trouble? See, for many things we want to do, it is crucial that +//@ Rust knows the precise, fixed size of the type - that is, how large this type will be when represented in memory. For example, for a `Vec`, the +//@ elements are stored one right after the other. How should that be possible, without a fixed size? The point is, `FnMut(i32)` could be of any size. +//@ We don't know how large that "type that implemenets `FnMut(i32)`" is. Rust calls this an *unsized* type. Whenever we introduce a type variable, Rust +//@ will implicitly add a bound to that variable, demanding that it is sized. That's why we did not have to worry about this so far.
+//@ You can opt-out of this implicit bound by saying `T: ?Sized`. Then `T` may or may not be sized. + +//@ So, what can we do, if we can't store the callbacks in a vector? We can put them in a box. Semantically, `Box` is a lot like `T`: You fully own +//@ the data stored there. On the machine, however, `Box` is a *pointer* to a heap-allocated `T`. It is a lot like `std::unique_ptr` in C++. In our current example, +//@ the important bit is that since it's a pointer, `T` can be unsized, but `Box` itself will always be sized. So we can put it in a `Vec`. +pub struct Callbacks { + callbacks: Vec>, } -mod callbacks_clone { - //@ So, this worked great, didn't it! There's one point though that I'd like to emphasize: One cannot `clone` a closure. - //@ Hence it becomes impossible to implement `Clone` for our `Callbacks` type. What could we do about this? - - //@ You already learned about `Box` above. `Box` is an example of a *smart pointer*: It's like a pointer (in the C - //@ sense), but with some additional smarts to it. For `Box`, that's the part about ownership. Once you drop the box, the - //@ content it points to will be deleted.
- //@ Another example of a smart pointer is `Rc`. This is short for *reference-counter*, so you can already guess how - //@ this pointer is smart: It has a reference count. You can `clone` an `Rc` as often as you want, that doesn't affect the - //@ data it contains at all. It only creates more references to the same data. Once all the references are gone, the data is deleted. - //@ - //@ Wait a moment, you may say here. Multiple references to the same data? That's aliasing! Indeed: - //@ Once data is stored in an `Rc`, it is read-only. By dereferencing the smart `Rc`, you can only get a shared borrow of the data. - use std::rc::Rc; - - //@ Because of this read-only restriction, we cannot use `FnMut` here: We'd be unable to call the function with a mutable borrow - //@ of it's environment! So we have to go with `Fn`. We wrap that in an `Rc`, and then Rust happily derives `Clone` for us. - #[derive(Clone)] - pub struct Callbacks { - callbacks: Vec>, +impl Callbacks { + // Now we can provide some functions. The constructor should be straight-forward. + pub fn new() -> Self { + Callbacks { callbacks: Vec::new() } /*@*/ } - impl Callbacks { - pub fn new() -> Self { - Callbacks { callbacks: Vec::new() } /*@*/ - } + // Registration simply stores the callback. + pub fn register(&mut self, callback: Box) { + self.callbacks.push(callback); /*@*/ + } - // For the `register` function, we don't actually have to use trait objects in the argument. - //@ We can make this function generic, such that it will be instantiated with some concrete closure type `F` - //@ and do the creation of the `Rc` and the conversion to `Fn(i32)` itself. - - //@ For this to work, we need to demand that the type `F` does not contain any short-lived borrows. After all, we will store it - //@ in our list of callbacks indefinitely. If the closure contained a pointer to our caller's stackframe, that pointer - //@ could be invalid by the time the closure is called. We can mitigate this by bounding `F` by a *lifetime*: `T: 'a` says - //@ that all data of type `T` will *outlive* (i.e., will be valid for at least as long as) lifetime `'a`. - //@ Here, we use the special lifetime `'static`, which is the lifetime of the entire program. - //@ The same bound has been implicitly added in the version of `register` above, and in the definition of - //@ `Callbacks`. This is the reason we could not have the borrowed `count` in the closure in `demo` previously. - pub fn register(&mut self, callback: F) { - self.callbacks.push(Rc::new(callback)); /*@*/ - } + // We can also write a generic version of `register`, such that it will be instantiated with some concrete closure type `F` + // and do the creation of the `Box` and the conversion from `F` to `FnMut(i32)` itself. + + //@ For this to work, we need to demand that the type `F` does not contain any short-lived borrows. After all, we will store it + //@ in our list of callbacks indefinitely. If the closure contained a pointer to our caller's stackframe, that pointer + //@ could be invalid by the time the closure is called. We can mitigate this by bounding `F` by a *lifetime*: `F: 'a` says + //@ that all data of type `F` will *outlive* (i.e., will be valid for at least as long as) lifetime `'a`. + //@ Here, we use the special lifetime `'static`, which is the lifetime of the entire program. + //@ The same bound has been implicitly added in the version of `register` above, and in the definition of + //@ `Callbacks`. + pub fn register_generic(&mut self, callback: F) { + self.callbacks.push(Box::new(callback)); /*@*/ + } - pub fn call(&mut self, val: i32) { - // We only need a shared iterator here. `Rc` also implicitly dereferences, so we can simply call the callback. - for callback in self.callbacks.iter() { - callback(val); /*@*/ - } + // And here we call all the stored callbacks. + pub fn call(&mut self, val: i32) { + // Since they are of type `FnMut`, we need to mutably iterate. + for callback in self.callbacks.iter_mut() { + //@ Here, `callback` has type `&mut Box`. We can make use of the fact that `Box` is a *smart pointer*: In + //@ particular, we can use it as if it were a normal pointer, and use `*` to get to its contents. Then we mutably borrow + //@ these contents, because we call a `FnMut`. + (&mut *callback)(val); /*@*/ + //@ Just like it is the case with normal borrows, this typically happens implicitly, so we can also directly call the function. + //@ Try removing the `&mut *`. + //@ + //@ The difference to a normal pointer is that `Box` implies ownership: Once you drop the box (i.e., when the entire `Callbacks` instance is + //@ dropped), the content it points to on the heap will be deleted. } } +} - // The demo works just as above. Our counting callback doesn't work anymore though, because we are using `Fn` now. - fn demo(c: &mut Callbacks) { - c.register(|val| println!("Callback 1: {}", val)); - c.call(0); c.call(1); +// Now we are ready for the demo. Remember to edit `main.rs` to run it. +pub fn main() { + let mut c = Callbacks::new(); + c.register(Box::new(|val| println!("Callback 1: {}", val))); + c.call(0); + + { + //@ We can even register callbacks that modify their environment. Per default, Rust will attempt to borrow `count`. However, + //@ that doesn't work out this time. Remember the `'static` bound above? Borrowing `count` in the environment would + //@ violate that bound, as the borrow is only valid for this block. If the callbacks are triggered later, we'd be in trouble. + //@ We have to explicitly tell Rust to `move` ownership of the variable into the closure. Its environment will then contain a + //@ `usize` rather than a `&mut uszie`, and the closure has no effect on this local variable anymore. + let mut count: usize = 0; + c.register_generic(move |val| { + count = count+1; + println!("Callback 2: {} ({}. time)", val, count); + } ); } + c.call(1); c.call(2); } -// **Exercise 11.1**: We made the arbitrary choice of using `i32` for the arguments. Generalize the data-structures above -// to work with an arbitrary type `T` that's passed to the callbacks. Since you need to call multiple callbacks with the -// same `t: T`, you will either have to restrict `T` to `Copy` types, or pass a borrow. - //@ ## Run-time behavior //@ When you run the program above, how does Rust know what to do with the callbacks? Since an unsized type lacks some information, -//@ a *pointer* to such a type (be it a `Box`, an `Rc` or a borrow) will need to complete this information. We say that pointers to +//@ a *pointer* to such a type (be it a `Box` or a borrow) will need to complete this information. We say that pointers to //@ trait objects are *fat*. They store not only the address of the object, but (in the case of trait objects) also a *vtable*: A //@ table of function pointers, determining the code that's run when a trait method is called. There are some restrictions for traits to be usable //@ as trait objects. This is called *object safety* and described in [the documentation](http://doc.rust-lang.org/stable/book/trait-objects.html) and [the reference](http://doc.rust-lang.org/reference.html#trait-objects). +//@ In case of the `FnMut` trait, there's only a single action to be performed: Calling the closure. You can thus think of a pointer to `FnMut` as +//@ a pointer to the code, and a pointer to the environment. This is how Rust recovers the typical encoding of closures as a special case of a more +//@ general concept. //@ -//@ Whenever you write a generic function, you have a choice: You can make it polymorphic, like our `vec_min`. Or you -//@ can use trait objects, like the first `register` above. The latter will result in only a single compiled version (rather +//@ Whenever you write a generic function, you have a choice: You can make it generic, like `register_generic`. Or you +//@ can use trait objects, like `register`. The latter will result in only a single compiled version (rather //@ than one version per type it is instantiated with). This makes for smaller code, but you pay the overhead of the virtual function calls. -//@ Isn't it beautiful how traits can handle both of these cases (and much more, as we saw, like closures and operator overloading) nicely? +//@ (Of course, in the case of `register` above, there's no function called on the trait object.) +//@ Isn't it beautiful how traits can nicely handle this tradeoff (and much more, as we saw, like closures and operator overloading)? + +// **Exercise 11.1**: We made the arbitrary choice of using `i32` for the arguments. Generalize the data-structures above +// to work with an arbitrary type `T` that's passed to the callbacks. Since you need to call multiple callbacks with the +// same `t: T`, you will either have to restrict `T` to `Copy` types, or pass a borrow. //@ [index](main.html) | [previous](part10.html) | [next](part12.html) diff --git a/src/part12.rs b/src/part12.rs index dc0da61..c749865 100644 --- a/src/part12.rs +++ b/src/part12.rs @@ -1,191 +1,163 @@ -// Rust-101, Part 12: Concurrency, Arc, Send -// ========================================= - -use std::io::prelude::*; -use std::{io, fs, thread}; -use std::sync::mpsc::{sync_channel, SyncSender, Receiver}; -use std::sync::Arc; - -//@ Our next stop are the concurrency features of Rust. We are going to write our own small version of "grep", -//@ called *rgrep*, and it is going to make use of concurrency: One thread reads the input files, one thread does -//@ the actual matching, and one thread writes the output. I already mentioned in the beginning of the course that -//@ Rust's type system (more precisely, the discipline of ownership and borrowing) will help us to avoid a common -//@ pitfall of concurrent programming: data races. - -// Before we come to the actual code, we define a data-structure `Options` to store all the information we need -// to complete the job: Which files to work on, which pattern to look for, and how to output.
-//@ Besides just printing all the matching lines, we will also offer to count them, or alternatively to sort them. -#[derive(Clone,Copy)] -pub enum OutputMode { - Print, - SortAndPrint, - Count, -} -use self::OutputMode::*; +// Rust-101, Part 12: Rc, Interior Mutability, Cell, RefCell +// ========================================================= + +use std::rc::Rc; +use std::cell::{Cell, RefCell}; -pub struct Options { - pub files: Vec, - pub pattern: String, - pub output_mode: OutputMode, +//@ Our generic callback mechanism is already working quite nicely. However, there's one point we may want to fix: +//@ `Callbacks` does not implement `Clone`. The problem is that closures (or rather, their environment) can never be cloned. +//@ (There's not even an automatic derivation happening for the cases where it would be possible.) +//@ This restriction propagates up to `Callbacks` itself. What could we do about this? + +//@ The solution is to find some way of cloning `Callbacks` without cloning the environments. This can be achieved with +//@ `Rc`, a *reference-counted* pointer. This is is another example of a smart pointer. You can `clone` an `Rc` as often +//@ as you want, that doesn't affect the data it contains. It only creates more references to the same data. Once all the +//@ references are gone, the data is deleted. +//@ +//@ Wait a moment, you may say here. Multiple references to the same data? That's aliasing! Indeed: +//@ Once data is stored in an `Rc`, it is read-only and you can only ever get a shared borrow of the data again. + +//@ Because of this read-only restriction, we cannot use `FnMut` here: We'd be unable to call the function with a mutable borrow +//@ of it's environment! So we have to go with `Fn`. We wrap that in an `Rc`, and then Rust happily derives `Clone` for us. +#[derive(Clone)] +struct Callbacks { + callbacks: Vec>, } -//@ Now we can write three functions to do the actual job of reading, matching, and printing, respectively. -//@ To get the data from one thread to the next, we will use *message passing*: We will establish communication -//@ channels between the threads, with one thread *sending* data, and the other one *receiving* it. `SyncSender` -//@ is the type of the sending end of a synchronous channel transmitting data of type `T`. *Synchronous* here -//@ means that the `send` operation could block, waiting for the other side to make progress. We don't want to -//@ end up with the entire file being stored in the buffer of the channels, and the output not being fast enough -//@ to keep up with the speed of input. -//@ -//@ We also need all the threads to have access to the options of the job they are supposed to do. Since it would -//@ be rather unnecessary to actually copy these options around, we will use reference-counting to share them between -//@ all threads. `Arc` is the thread-safe version of `Rc`, using atomic operations to keep the reference count up-to-date. - -// The first function reads the files, and sends every line over the `out_channel`. -fn read_files(options: Arc, out_channel: SyncSender) { - for file in options.files.iter() { - // First, we open the file, ignoring any errors. - let file = fs::File::open(file).unwrap(); - // Then we obtain a `BufReader` for it, which provides the `lines` function. - let file = io::BufReader::new(file); - for line in file.lines() { - let line = line.unwrap(); - // Now we send the line over the channel, ignoring the possibility of `send` failing. - out_channel.send(line).unwrap(); - } +impl Callbacks { + pub fn new() -> Self { + Callbacks { callbacks: Vec::new() } /*@*/ } - // When we drop the `out_channel`, it will be closed, which the other end can notice. -} -// The second function filters the lines it receives through `in_channel` with the pattern, and sends -// matches via `out_channel`. -fn filter_lines(options: Arc, - in_channel: Receiver, - out_channel: SyncSender) { - // We can simply iterate over the channel, which will stop when the channel is closed. - for line in in_channel.iter() { - // `contains` works on lots of types of patterns, but in particular, we can use it to test whether - // one string is contained in another. This is another example of Rust using traits as substitute for overloading. - if line.contains(&options.pattern) { - out_channel.send(line).unwrap(); /*@*/ - } + // Registration works just like last time, except that we are creating an `Rc` now. + pub fn register(&mut self, callback: F) { + self.callbacks.push(Rc::new(callback)); /*@*/ } -} -// The third function performs the output operations, receiving the relevant lines on its `in_channel`. -fn output_lines(options: Arc, in_channel: Receiver) { - match options.output_mode { - Print => { - // Here, we just print every line we see. - for line in in_channel.iter() { - println!("{}", line); /*@*/ - } - }, - Count => { - // We are supposed to count the number of matching lines. There's a convenient iterator adapter that - // we can use for this job. - let count = in_channel.iter().count(); /*@*/ - println!("{} hits for {}.", count, options.pattern); /*@*/ - }, - SortAndPrint => { - // We are asked to sort the matching lines before printing. So let's collect them all in a local vector... - let mut data: Vec = in_channel.iter().collect(); - // ...and implement the actual sorting later. - unimplemented!() + pub fn call(&self, val: i32) { + // We only need a shared iterator here. Since `Rc` is a smart pointer, we can directly call the callback. + for callback in self.callbacks.iter() { + callback(val); /*@*/ } } } -// With the operations of the three threads defined, we can now implement a function that performs grepping according -// to some given options. -pub fn run(options: Options) { - // We move the `options` into an `Arc`, as that's what the thread workers expect. - let options = Arc::new(options); - - // This sets up the channels. We use a `sync_channel` with buffer-size of 16 to avoid needlessly filling RAM. - let (line_sender, line_receiver) = sync_channel(16); - let (filtered_sender, filtered_receiver) = sync_channel(16); - - // Spawn the read thread: `thread::spawn` takes a closure that is run in a new thread. - //@ The `move` keyword again tells Rust that we want ownership of captured variables to be moved into the - //@ closure. This means we need to do the `clone` *first*, otherwise we would lose our `options` to the - //@ new thread! - let options1 = options.clone(); - let handle1 = thread::spawn(move || read_files(options1, line_sender)); - - // Same with the filter thread. - let options2 = options.clone(); - let handle2 = thread::spawn(move || { - filter_lines(options2, line_receiver, filtered_sender) - }); - - // And the output thread. - let options3 = options.clone(); - let handle3 = thread::spawn(move || output_lines(options3, filtered_receiver)); - - // Finally, wait until all three threads did their job. - //@ Joining a thread waits for its termination. This can fail if that thread panicked: In this case, we could get - //@ access to the data that it provided to `panic!`. Here, we just assert that they did not panic - so we will panic ourselves - //@ if that happened. - handle1.join().unwrap(); - handle2.join().unwrap(); - handle3.join().unwrap(); +// Time for a demo! +fn demo(c: &mut Callbacks) { + c.register(|val| println!("Callback 1: {}", val)); + c.call(0); c.clone().call(1); } -// Now we have all the pieces together for testing our rgrep with some hard-coded options. -//@ We need to call `to_string` on string literals to convert them to a fully-owned `String`. pub fn main() { - let options = Options { - files: vec!["src/part10.rs".to_string(), - "src/part11.rs".to_string(), - "src/part12.rs".to_string()], - pattern: "let".to_string(), - output_mode: Print - }; - run(options); + let mut c = Callbacks::new(); + demo(&mut c); } -// **Exercise 12.1**: Change rgrep such that it prints not only the matching lines, but also the name of the file -// and the number of the line in the file. You will have to change the type of the channels from `String` to something -// that records this extra information. - -//@ ## Ownership, Borrowing, and Concurrency -//@ The little demo above showed that concurrency in Rust has a fairly simple API. Considering Rust has closures, -//@ that should not be entirely surprising. However, as it turns out, Rust goes well beyond this and actually ensures -//@ the absence of data races.
-//@ A data race is typically defined as having two concurrent, unsynchronized -//@ accesses to the same memory location, at least one of which is a write. In other words, a data race is mutation in -//@ the presence of aliasing, which Rust reliably rules out! It turns out that the same mechanism that makes our single-threaded -//@ programs memory safe, and that prevents us from invalidating iterators, also helps secure our multi-threaded code against -//@ data races. For example, notice how `read_files` sends a `String` to `filter_lines`. At run-time, only the pointer to -//@ the character data will actually be moved around (just like when a `String` is passed to a function with full ownership). However, -//@ `read_files` has to *give up* ownership of the string to perform `send`, to it is impossible for an outstanding borrow to -//@ still be around. After it sent the string to the other side, `read_files` has no pointer into the string content -//@ anymore, and hence no way to race on the data with someone else. -//@ -//@ There is a little more to this. Remember the `'static` bound we had to add to `register` in the previous part, to make -//@ sure that the callbacks do not reference any pointers that might become invalid? This is just as crucial for spawning -//@ a thread: In general, that thread could last for much longer than the current stack frame. Thus, it must not use -//@ any pointers to data in that stack frame. This is achieved by requiring the `FnOnce` closure passed to `thread::spawn` -//@ to be valid for lifetime `'static`, as you can see in [its documentation](http://doc.rust-lang.org/stable/std/thread/fn.spawn.html). -//@ This avoids another kind of data race, where the thread's access races with the callee deallocating its stack frame. -//@ It is only thanks to the concept of lifetimes that this can be expressed as part of the type of `spawn`. - -//@ ## Send -//@ However, the story goes even further. I said above that `Arc` is a thread-safe version of `Rc`, which uses atomic operations -//@ to manipulate the reference count. It is thus crucial that we don't use `Rc` across multiple threads, or the reference count may -//@ become invalid. And indeed, if you replace `Arc` by `Rc` (and add the appropriate imports), Rust will tell you that something -//@ is wrong. That's great, of course, but how did it do that? -//@ -//@ The answer is already hinted at in the error: It will say something about `Send`. You may have noticed that the closure in -//@ `thread::spawn` does not just have a `'static` bound, but also has to satisfy `Send`. `Send` is a trait, and just like `Copy`, -//@ it's just a marker - there are no functions provided by `Send`. What the trait says is that types which are `Send`, can be -//@ safely sent to another thread without causing trouble. Of course, all the primitive data-types are `Send`. So is `Arc`, -//@ which is why Rust accepted our code. But `Rc` is not `Send`, and for a good reason! +// ## Interior Mutability +//@ Of course, the counting example from last time does not work anymore: It needs to mutate the environment, which a `Fn` +//@ cannot do. The strict borrowing Rules of Rust are getting into our way. However, when it comes to mutating a mere number +//@ (`usize`), there's not really any chance of problems coming up. Everybody can read and write that variable just as they want. +//@ So it would be rather sad if we were not able to write this program. Lucky enough, Rust's standard library provides a +//@ solution in the form of `Cell`. This type represents a memory cell of some type `T`, providing the two basic operations +//@ `get` and `set`. `get` returns a *copy* of the content of the cell, so all this works only if `T` is `Copy`. +//@ `set`, which overrides the content, only needs a *shared borrow* of the cell. The phenomenon of a type that permits mutation through +//@ shared borrows (i.e., mutation despite the possibility of aliasing) is called *interior mutability*. You can think +//@ of `set` changing only the *contents* of the cell, not its *identity*. In contrast, the kind of mutation we saw so far was +//@ about replacing one piece of data by something else of the same type. This is called *exterior mutability*.
+//@ Notice that it is impossible to *borrow* the contents of the cell, and that is actually the key to why this is safe. + +// So, let us put our counter in a `Cell`, and replicate the example from the previous part. +fn demo_cell(c: &mut Callbacks) { + { + let count = Cell::new(0); + // Again, we have to move ownership if the `count` into the environment closure. + c.register(move |val| { + // In here, all we have is a shared borrow of our environment. But that's good enough for the `get` and `set` of the cell! + //@ At run-time, the `Cell` will be almost entirely compiled away, so this becomes pretty much equivalent to the version + //@ we wrote in the previous part. + let new_count = count.get()+1; + count.set(new_count); + println!("Callback 2: {} ({}. time)", val, new_count); + } ); + } + + c.call(2); c.clone().call(3); +} + +//@ It is worth mentioning that `Rc` itself also has to make use of interior mutability: When you `clone` an `Rc`, all it has available +//@ is a shared borrow. However, it has to increment the reference count! Internally, `Rc` uses `Cell` for the count, such that it +//@ can be updated during `clone`. + +// ## `RefCell` +//@ As the next step in the evolution of `Callbacks`, we could try to solve this problem of mutability once and for all, by adding `Cell` +//@ to `Callbacks` such that clients don't have to worry about this. However, that won't end up working: Remember that `Cell` only works +//@ with types that are `Copy`, which the environment of a closure will never be. We need a variant of `Cell` that allows borrowing its +//@ contents, such that we can provide a `FnMut` with its environment. But if `Cell` would allow that, we could write down all those +//@ crashing C++ programs that we wanted to get rid of. //@ -//@ Now, `Send` as a trait is fairly special. It has a so-called *default implementation*. This means that *every type* implements -//@ `Send`, unless it opts out. Opting out is viral: If your type contains a type that opted out, then you don't have `Send`, either. -//@ So if the environment of your closure contains an `Rc`, it won't be `Send`, preventing it from causing trouble. If however every -//@ captured variable *is* `Send`, then so is the entire environment, and you are good. +//@ This is the point where our program got too complex for Rust to guarantee at compile-time that nothing bad will happen. Since we don't +//@ want to give up the safety guarantee, we are going to need some code that actually checks at run-time that the borrowing rules +//@ are not violated. Such a check is provided by `RefCell`: Unlike `Cell`, this lets us borrow the contents, and it works for +//@ non-`Copy` `T`. But, as we will see, it incurs some run-time overhead. + +// Our final version of `Callbacks` puts the closure environment into a `RefCell`. +#[derive(Clone)] +struct CallbacksMut { + callbacks: Vec>>, +} + +impl CallbacksMut { + pub fn new() -> Self { + CallbacksMut { callbacks: Vec::new() } /*@*/ + } + + pub fn register(&mut self, callback: F) { + let cell = Rc::new(RefCell::new(callback)); + self.callbacks.push(cell); /*@*/ + } + + pub fn call(&mut self, val: i32) { + for callback in self.callbacks.iter() { + // We have to *explicitly* borrow the contents of a `RefCell` by calling `borrow` or `borrow_mut`. + //@ At run-time, the cell will keep track of the number of outstanding shared and mutable borrows, + //@ and panic if the rules are violated.
+ //@ For this check to be performed, `closure` is a *guard*: Rather than a normal borrow, `borrow_mut` returns + //@ a smart pointer (`RefMut`, in this case) that waits until is goes out of scope, and then + //@ appropriately updates the number of active borrows. + //@ + //@ Since `call` is the only place that borrows the environments of the closures, we should expect that + //@ the check will always succeed. However, this function would still typecheck with an immutable borrow of `self` (since we are + //@ relying on the interior mutability of `RefCell`). Under this condition, it could happen that a callback + //@ will in turn trigger another round of callbacks, so that `call` would indirectly call itself. + //@ This is called reentrancy. It would imply that we borrow the closure a second time, and + //@ panic at run-time. I hope this also makes it clear that there's absolutely no hope of Rust + //@ performing these checks statically, at compile-time: It would have to detect reentrancy! + let mut closure = callback.borrow_mut(); + // Unfortunately, Rust's auto-dereference of pointers is not clever enough here. We thus have to explicitly + // dereference the smart pointer and obtain a mutable borrow of the content. + (&mut *closure)(val); + } + } +} + +// Now we can repeat the demo from the previous part - but this time, our `CallbacksMut` type +// can be cloned. +fn demo_mut(c: &mut CallbacksMut) { + c.register(|val| println!("Callback 1: {}", val)); + c.call(0); + + { + let mut count: usize = 0; + c.register(move |val| { + count = count+1; + println!("Callback 2: {} ({}. time)", val, count); + } ); + } + c.call(1); c.clone().call(2); +} + +// **Exercise 12.1**: Change the type of `call` to ask only for a shared borrow. Then write some piece of code using only the available, public +// interface of `CallbacksMut` such that a reentrant call to `call` is happening, and the program aborts because the `RefCell` refuses to hand +// out a second mutable borrow to its content. //@ [index](main.html) | [previous](part11.html) | [next](part13.html) diff --git a/src/part13.rs b/src/part13.rs index 29647ea..76d7154 100644 --- a/src/part13.rs +++ b/src/part13.rs @@ -1,161 +1,191 @@ -// Rust-101, Part 13: Slices, Arrays, External Dependencies -// ======================================================== - -//@ To complete rgrep, there are two pieces we still need to implement: Sorting, and taking the job options -//@ as argument to the program, rather than hard-coding them. Let's start with sorting. - -// ## Slices -//@ Again, we first have to think about the type we want to give to our sorting function. We may be inclined to -//@ pass it a `Vec`. Of course, sorting does not actually consume the argument, so we should make that a `&mut Vec`. -//@ But there's a problem with that: If we want to implement some divide-and-conquer sorting algorithm (say, -//@ Quicksort), then we will have to *split* our argument at some point, and operate recursively on the two parts. -//@ But we can't split a `Vec`! We could now extend the function signature to also take some indices, marking the -//@ part of the vector we are supposed to sort, but that's all rather clumsy. Rust offers a nicer solution. - -//@ `[T]` is the type of an (unsized) *array*, with elements of type `T`. All this means is that there's a contiguous -//@ region of memory, where a bunch of `T` are stored. How many? We can't tell! This is an unsized type. Just like for -//@ trait objects, this means we can only operate on pointers to that type, and these pointers will carry the missing -//@ information - namely, the length. Such a pointer is called a *slice*. As we will see, a slice can be split. -//@ Our function can thus take a borrowed slice, and promise to sort all elements in there. -pub fn sort(data: &mut [T]) { - if data.len() < 2 { return; } - - // We decide that the element at 0 is our pivot, and then we move our cursors through the rest of the slice, - // making sure that everything on the left is no larger than the pivot, and everything on the right is no smaller. - let mut lpos = 1; - let mut rpos = data.len(); - /* Invariant: pivot is data[0]; everything with index (0,lpos) is <= pivot; - [rpos,len) is >= pivot; lpos < rpos */ - loop { - // **Exercise 13.1**: Complete this Quicksort loop. You can use `swap` on slices to swap two elements. Write a - // test function for `sort`. - unimplemented!() - } - - // Once our cursors met, we need to put the pivot in the right place. - data.swap(0, lpos-1); - - // Finally, we split our slice to sort the two halves. The nice part about slices is that splitting them is cheap: - //@ They are just a pointer to a start address, and a length. We can thus get two pointers, one at the beginning and - //@ one in the middle, and set the lengths appropriately such that they don't overlap. This is what `split_at_mut` does. - //@ Since the two slices don't overlap, there is no aliasing and we can have them both mutably borrowed. - let (part1, part2) = data.split_at_mut(lpos); - //@ The index operation can not only be used to address certain elements, it can also be used for *slicing*: Giving a range - //@ of indices, and obtaining an appropriate part of the slice we started with. Here, we remove the last element from - //@ `part1`, which is the pivot. This makes sure both recursive calls work on strictly smaller slices. - sort(&mut part1[..lpos-1]); /*@*/ - sort(part2); /*@*/ +// Rust-101, Part 13: Concurrency, Arc, Send +// ========================================= + +use std::io::prelude::*; +use std::{io, fs, thread}; +use std::sync::mpsc::{sync_channel, SyncSender, Receiver}; +use std::sync::Arc; + +//@ Our next stop are the concurrency features of Rust. We are going to write our own small version of "grep", +//@ called *rgrep*, and it is going to make use of concurrency: One thread reads the input files, one thread does +//@ the actual matching, and one thread writes the output. I already mentioned in the beginning of the course that +//@ Rust's type system (more precisely, the discipline of ownership and borrowing) will help us to avoid a common +//@ pitfall of concurrent programming: data races. + +// Before we come to the actual code, we define a data-structure `Options` to store all the information we need +// to complete the job: Which files to work on, which pattern to look for, and how to output.
+//@ Besides just printing all the matching lines, we will also offer to count them, or alternatively to sort them. +#[derive(Clone,Copy)] +pub enum OutputMode { + Print, + SortAndPrint, + Count, } +use self::OutputMode::*; -// **Exercise 13.2**: Since `String` implements `PartialEq`, you can now change the function `output_lines` in the previous part -// to call the sort function above. If you did exercise 12.1, you will have slightly more work. Make sure you sort by the matched line -// only, not by filename or line number! - -// Now, we can sort, e.g., an vector of numbers. -fn sort_nums(data: &mut Vec) { - //@ Vectors support slicing, just like slices do. Here, `..` denotes the full range, which means we want to slice the entire vector. - //@ It is then passed to the `sort` function, which doesn't even know that it is working on data inside a vector. - sort(&mut data[..]); +pub struct Options { + pub files: Vec, + pub pattern: String, + pub output_mode: OutputMode, } -// ## Arrays -//@ An *array* in Rust is given be the type `[T; n]`, where `n` is some *fixed* number. So, `[f64; 10]` is an array of 10 floating-point -//@ numbers, all one right next to the other in memory. Arrays are sized, and hence can be used like any other type. But we can also -//@ borrow them as slices, e.g., to sort them. -fn sort_array() { - let mut array_of_data: [f64; 5] = [1.0, 3.4, 12.7, -9.12, 0.1]; - sort(&mut array_of_data); +//@ Now we can write three functions to do the actual job of reading, matching, and printing, respectively. +//@ To get the data from one thread to the next, we will use *message passing*: We will establish communication +//@ channels between the threads, with one thread *sending* data, and the other one *receiving* it. `SyncSender` +//@ is the type of the sending end of a synchronous channel transmitting data of type `T`. *Synchronous* here +//@ means that the `send` operation could block, waiting for the other side to make progress. We don't want to +//@ end up with the entire file being stored in the buffer of the channels, and the output not being fast enough +//@ to keep up with the speed of input. +//@ +//@ We also need all the threads to have access to the options of the job they are supposed to do. Since it would +//@ be rather unnecessary to actually copy these options around, we will use reference-counting to share them between +//@ all threads. `Arc` is the thread-safe version of `Rc`, using atomic operations to keep the reference count up-to-date. + +// The first function reads the files, and sends every line over the `out_channel`. +fn read_files(options: Arc, out_channel: SyncSender) { + for file in options.files.iter() { + // First, we open the file, ignoring any errors. + let file = fs::File::open(file).unwrap(); + // Then we obtain a `BufReader` for it, which provides the `lines` function. + let file = io::BufReader::new(file); + for line in file.lines() { + let line = line.unwrap(); + // Now we send the line over the channel, ignoring the possibility of `send` failing. + out_channel.send(line).unwrap(); + } + } + // When we drop the `out_channel`, it will be closed, which the other end can notice. } -// ## External Dependencies -//@ This leaves us with just one more piece to complete rgrep: Taking arguments from the command-line. We could now directly work on -//@ [`std::env::args`](http://doc.rust-lang.org/stable/std/env/fn.args.html) to gain access to those arguments, and this would become -//@ a pretty boring lesson in string manipulation. Instead, I want to use this opportunity to show how easy it is to benefit from -//@ other people's work in your program. -//@ -//@ For sure, we are not the first to equip a Rust program with support for command-line arguments. Someone must have written a library -//@ for the job, right? Indeed, someone has. Rust has a central repository of published libraries, called [crates.io](https://crates.io/). -//@ It's a bit like [PyPI](https://pypi.python.org/pypi) or the [Ruby Gems](https://rubygems.org/): Everybody can upload their code, -//@ and there's tooling for importing that code into your project. This tooling is provided by `cargo`, the tool we are already using to -//@ build this tutorial. (`cargo` also has support for *publishing* your crate on crates.io, I refer you to [the documentation](http://doc.crates.io/crates-io.html) for more details.) -//@ In this case, we are going to use the [`docopt` crate](https://crates.io/crates/docopt), which creates a parser for command-line -//@ arguments based on the usage string. External dependencies are declared in the `Cargo.toml` file. - -//@ I already prepared that file, but the declaration of the dependency is still commented out. So please open `Cargo.toml` of your workspace -//@ now, and enabled the two commented-out lines. Then do `cargo build`. Cargo will now download the crate from crates.io, compile it, -//@ and link it to your program. In the future, you can do `cargo update` to make it download new versions of crates you depend on. -//@ Note that crates.io is only the default location for dependencies, you can also give it the URL of a git repository or some local -//@ path. All of this is explained in the [Cargo Guide](http://doc.crates.io/guide.html). - -// I disabled the following module (using a rather bad hack), because it only compiles if `docopt` is linked. -// Remove the attribute of the `rgrep` module to enable compilation. -#[cfg(feature = "disabled")] -pub mod rgrep { - // Now that `docopt` is linked, we can first root it in the namespace and then import it with `use`. We also import some other pieces that we will need. - extern crate docopt; - use self::docopt::Docopt; - use part12::{run, Options, OutputMode}; - use std::process; - - // The `USAGE` string documents how the program is to be called. It's written in a format that `docopt` can parse. - static USAGE: &'static str = " -Usage: rgrep [-c] [-s] ... - -Options: - -c, --count Count number of matching lines (rather than printing them). - -s, --sort Sort the lines before printing. -"; - - // This function extracts the rgrep options from the command-line arguments. - fn get_options() -> Options { - // Parse `argv` and exit the program with an error message if it fails. This is taken from the [`docopt` documentation](http://burntsushi.net/rustdoc/docopt/). - //@ The function `and_then` takes a closure from `T` to `Result`, and uses it to transform a `Result` to a - //@ `Result`. This way, we can chain computations that only happen if the previous one succeeded (and the error - //@ type has to stay the same). In case you know about monads, this style of programming will be familiar to you. - //@ There's a similar function for `Option`. `unwrap_or_else` is a bit like `unwrap`, but rather than panicking in - //@ case of an `Err`, it calls the closure. - let args = Docopt::new(USAGE).and_then(|d| d.parse()).unwrap_or_else(|e| e.exit()); - // Now we can get all the values out. - let count = args.get_bool("-c"); - let sort = args.get_bool("-s"); - let pattern = args.get_str(""); - let files = args.get_vec(""); - if count && sort { - println!("Setting both '-c' and '-s' at the same time does not make any sense."); - process::exit(1); +// The second function filters the lines it receives through `in_channel` with the pattern, and sends +// matches via `out_channel`. +fn filter_lines(options: Arc, + in_channel: Receiver, + out_channel: SyncSender) { + // We can simply iterate over the channel, which will stop when the channel is closed. + for line in in_channel.iter() { + // `contains` works on lots of types of patterns, but in particular, we can use it to test whether + // one string is contained in another. This is another example of Rust using traits as substitute for overloading. + if line.contains(&options.pattern) { + out_channel.send(line).unwrap(); /*@*/ } + } +} - // We need to make the strings owned to construct the `Options` instance. - //@ If you check all the types carefully, you will notice that `pattern` above is of type `&str`. `str` is the type of a UTF-8 - //@ encoded string, that is, a bunch of bytes in memory (`[u8]`) that are valid according of UTF-8. `str` is unsized. `&str` - //@ stores the address of the character data, and their length. String literals like "this one" are - //@ of type `&'static str`: They point right to the constant section of the binary, so - //@ However, the borrow is valid for as long as the program runs, hence it has lifetime `'static`. Calling - //@ `to_string` will copy the string data into an owned buffer on the heap, and thus convert it to `String`. - let mode = if count { - OutputMode::Count - } else if sort { - OutputMode::SortAndPrint - } else { - OutputMode::Print - }; - Options { - files: files.iter().map(|file| file.to_string()).collect(), - pattern: pattern.to_string(), - output_mode: mode, +// The third function performs the output operations, receiving the relevant lines on its `in_channel`. +fn output_lines(options: Arc, in_channel: Receiver) { + match options.output_mode { + Print => { + // Here, we just print every line we see. + for line in in_channel.iter() { + println!("{}", line); /*@*/ + } + }, + Count => { + // We are supposed to count the number of matching lines. There's a convenient iterator adapter that + // we can use for this job. + let count = in_channel.iter().count(); /*@*/ + println!("{} hits for {}.", count, options.pattern); /*@*/ + }, + SortAndPrint => { + // We are asked to sort the matching lines before printing. So let's collect them all in a local vector... + let mut data: Vec = in_channel.iter().collect(); + // ...and implement the actual sorting later. + unimplemented!() } } +} - // Finally, we can call the `run` function from the previous part on the options extracted using `get_options`. Edit `main.rs` to call this function. - // You can now use `cargo run -- ` to call your program, and see the argument parser and the threads we wrote previously in action! - pub fn main() { - run(get_options()); /*@*/ - } +// With the operations of the three threads defined, we can now implement a function that performs grepping according +// to some given options. +pub fn run(options: Options) { + // We move the `options` into an `Arc`, as that's what the thread workers expect. + let options = Arc::new(options); + + // This sets up the channels. We use a `sync_channel` with buffer-size of 16 to avoid needlessly filling RAM. + let (line_sender, line_receiver) = sync_channel(16); + let (filtered_sender, filtered_receiver) = sync_channel(16); + + // Spawn the read thread: `thread::spawn` takes a closure that is run in a new thread. + //@ The `move` keyword again tells Rust that we want ownership of captured variables to be moved into the + //@ closure. This means we need to do the `clone` *first*, otherwise we would lose our `options` to the + //@ new thread! + let options1 = options.clone(); + let handle1 = thread::spawn(move || read_files(options1, line_sender)); + + // Same with the filter thread. + let options2 = options.clone(); + let handle2 = thread::spawn(move || { + filter_lines(options2, line_receiver, filtered_sender) + }); + + // And the output thread. + let options3 = options.clone(); + let handle3 = thread::spawn(move || output_lines(options3, filtered_receiver)); + + // Finally, wait until all three threads did their job. + //@ Joining a thread waits for its termination. This can fail if that thread panicked: In this case, we could get + //@ access to the data that it provided to `panic!`. Here, we just assert that they did not panic - so we will panic ourselves + //@ if that happened. + handle1.join().unwrap(); + handle2.join().unwrap(); + handle3.join().unwrap(); +} + +// Now we have all the pieces together for testing our rgrep with some hard-coded options. +//@ We need to call `to_string` on string literals to convert them to a fully-owned `String`. +pub fn main() { + let options = Options { + files: vec!["src/part10.rs".to_string(), + "src/part11.rs".to_string(), + "src/part12.rs".to_string()], + pattern: "let".to_string(), + output_mode: Print + }; + run(options); } -// **Exercise 13.3**: Wouldn't it be nice if rgrep supported regular expressions? There's already a crate that does all the parsing and matching on regular -// expression, it's called [regex](https://crates.io/crates/regex). Add this crate to the dependencies of your workspace, add an option ("-r") to switch -// the pattern to regular-expression mode, and change `filter_lines` to honor this option. The documentation of regex is available from its crates.io site. -// (You won't be able to use the `regex!` macro if you are on the stable or beta channel of Rust. But it wouldn't help for our use-case anyway.) +// **Exercise 12.1**: Change rgrep such that it prints not only the matching lines, but also the name of the file +// and the number of the line in the file. You will have to change the type of the channels from `String` to something +// that records this extra information. + +//@ ## Ownership, Borrowing, and Concurrency +//@ The little demo above showed that concurrency in Rust has a fairly simple API. Considering Rust has closures, +//@ that should not be entirely surprising. However, as it turns out, Rust goes well beyond this and actually ensures +//@ the absence of data races.
+//@ A data race is typically defined as having two concurrent, unsynchronized +//@ accesses to the same memory location, at least one of which is a write. In other words, a data race is mutation in +//@ the presence of aliasing, which Rust reliably rules out! It turns out that the same mechanism that makes our single-threaded +//@ programs memory safe, and that prevents us from invalidating iterators, also helps secure our multi-threaded code against +//@ data races. For example, notice how `read_files` sends a `String` to `filter_lines`. At run-time, only the pointer to +//@ the character data will actually be moved around (just like when a `String` is passed to a function with full ownership). However, +//@ `read_files` has to *give up* ownership of the string to perform `send`, to it is impossible for an outstanding borrow to +//@ still be around. After it sent the string to the other side, `read_files` has no pointer into the string content +//@ anymore, and hence no way to race on the data with someone else. +//@ +//@ There is a little more to this. Remember the `'static` bound we had to add to `register` in the previous part, to make +//@ sure that the callbacks do not reference any pointers that might become invalid? This is just as crucial for spawning +//@ a thread: In general, that thread could last for much longer than the current stack frame. Thus, it must not use +//@ any pointers to data in that stack frame. This is achieved by requiring the `FnOnce` closure passed to `thread::spawn` +//@ to be valid for lifetime `'static`, as you can see in [its documentation](http://doc.rust-lang.org/stable/std/thread/fn.spawn.html). +//@ This avoids another kind of data race, where the thread's access races with the callee deallocating its stack frame. +//@ It is only thanks to the concept of lifetimes that this can be expressed as part of the type of `spawn`. + +//@ ## Send +//@ However, the story goes even further. I said above that `Arc` is a thread-safe version of `Rc`, which uses atomic operations +//@ to manipulate the reference count. It is thus crucial that we don't use `Rc` across multiple threads, or the reference count may +//@ become invalid. And indeed, if you replace `Arc` by `Rc` (and add the appropriate imports), Rust will tell you that something +//@ is wrong. That's great, of course, but how did it do that? +//@ +//@ The answer is already hinted at in the error: It will say something about `Send`. You may have noticed that the closure in +//@ `thread::spawn` does not just have a `'static` bound, but also has to satisfy `Send`. `Send` is a trait, and just like `Copy`, +//@ it's just a marker - there are no functions provided by `Send`. What the trait says is that types which are `Send`, can be +//@ safely sent to another thread without causing trouble. Of course, all the primitive data-types are `Send`. So is `Arc`, +//@ which is why Rust accepted our code. But `Rc` is not `Send`, and for a good reason! +//@ +//@ Now, `Send` as a trait is fairly special. It has a so-called *default implementation*. This means that *every type* implements +//@ `Send`, unless it opts out. Opting out is viral: If your type contains a type that opted out, then you don't have `Send`, either. +//@ So if the environment of your closure contains an `Rc`, it won't be `Send`, preventing it from causing trouble. If however every +//@ captured variable *is* `Send`, then so is the entire environment, and you are good. //@ [index](main.html) | [previous](part12.html) | [next](part14.html) diff --git a/src/part14.rs b/src/part14.rs index 596094b..6550fe5 100644 --- a/src/part14.rs +++ b/src/part14.rs @@ -1,142 +1,161 @@ -// Rust-101, Part 14: Mutex, Interior Mutability, Sync -// =================================================== +// Rust-101, Part 14: Slices, Arrays, External Dependencies +// ======================================================== -use std::sync::{Arc, Mutex}; -use std::thread; +//@ To complete rgrep, there are two pieces we still need to implement: Sorting, and taking the job options +//@ as argument to the program, rather than hard-coding them. Let's start with sorting. -//@ We already saw that we can use `Arc` to share memory between threads. However, `Arc` can only provide *read-only* -//@ access to memory: Since there is aliasing, Rust cannot, in general, permit mutation. If however, -//@ some care would be taken at run-time, then mutation would still be all right: We have to ensure that whenever -//@ someone changes the data, nobody else is looking at it. In other words, we need a *critical section* or (as it -//@ is called in Rust) a [`Mutex`](http://doc.rust-lang.org/stable/std/sync/struct.Mutex.html). Some other languages also call this a *lock*. -//@ -//@ As an example, let us write a concurrent counter. As usual in Rust, we first have to think about our data layout. -//@ In case of the mutex, this means we have to declare the type of the data that we want to be protected. In Rust, -//@ a `Mutex` protects data, not code - and it is impossible to access the data in any other way. This is generally considered -//@ good style, but other languages typically lack the ability to actually enforce this. -//@ Of course, we want multiple threads to have access to this `Mutex`, so we wrap it in an `Arc`. -//@ -//@ Rather than giving every field a name, a struct can also be defined by just giving a sequence of types (similar -//@ to how a variant of an `enum` is defined). This is called a *tuple struct*. It is often used when constructing -//@ a *newtype*, as we do here: `ConcurrentCounter` is essentially just a new name for `Arc>`. However, -//@ is is a locally declared types, so we can give it an inherent implementation and implement traits for it. Since the -//@ field is private, nobody outside this module can even know the type we are wrapping. - -// The derived `Clone` implementation will clone the `Arc`, so all clones will actually talk about the same counter. -#[derive(Clone)] -struct ConcurrentCounter(Arc>); - -impl ConcurrentCounter { - // The constructor just wraps the constructors of `Arc` and `Mutex`. - pub fn new(val: usize) -> Self { - ConcurrentCounter(Arc::new(Mutex::new(val))) /*@*/ - } +// ## Slices +//@ Again, we first have to think about the type we want to give to our sorting function. We may be inclined to +//@ pass it a `Vec`. Of course, sorting does not actually consume the argument, so we should make that a `&mut Vec`. +//@ But there's a problem with that: If we want to implement some divide-and-conquer sorting algorithm (say, +//@ Quicksort), then we will have to *split* our argument at some point, and operate recursively on the two parts. +//@ But we can't split a `Vec`! We could now extend the function signature to also take some indices, marking the +//@ part of the vector we are supposed to sort, but that's all rather clumsy. Rust offers a nicer solution. - //@ The core operation is, of course, `increment`. The type may be surprising at first: A shared borrow? - //@ How can this be, since `increment` definitely modifies the counter? We already discussed above that `Mutex` is - //@ a way to get around this restriction in Rust. This phenomenon of data that can be mutated through a shared - //@ borrow is called *interior mutability*: We are changing the inner parts of the object, but seen from the outside, - //@ this does not count as "mutation". This stands in contrast to *exterior mutability*, which is the kind of - //@ mutability we saw so far, where one piece of data is replaced by something else of the same type. If you are familiar - //@ with languages like ML, you can compare this to how something of type `ref` permits mutation, even though it is - //@ itself a functional value (more precisely, a location) like all the others. - //@ - //@ Interior mutability breaks the rules of Rust that I outlined earlier: There is aliasing (a shared borrow) and mutation. - //@ The reason that this still works is careful programming of the primitives for interior mutability - in this case, that's - //@ `Mutex`. It has to ensure with dynamic checks, at run-time, that things don't fall apart. In particular, it has to ensure - //@ that the data covered by the mutex can only ever be accessed from inside a critical section. This is where Rust's type - //@ system comes into play: With its discipline of ownership and borrowing, it can enforce such rules. Let's see how this goes. - pub fn increment(&self, by: usize) { - // `lock` on a mutex returns a *guard*, giving access to the data contained in the mutex. - //@ (We will discuss the `unwrap` soon.) `.0` is how we access the first component of a tuple or a struct. - let mut counter = self.0.lock().unwrap(); - //@ The guard is another example of a smart pointer, and it can be used as if it were a pointer to the data protected - //@ by the lock. - *counter = *counter + by; - //@ At the end of the function, `counter` is dropped and the mutex is available again. - //@ This can only happen when full ownership of the guard is given up. In particular, it is impossible for us - //@ to borrow some of its content, release the lock of the mutex, and subsequently access the protected data without holding - //@ the lock. Enforcing the locking discipline is expressible in the Rust type system, so we don't have to worry - //@ about data races *even though* we are mutating shared memory! - //@ - //@ One of the subtle aspects of locking is *poisoning*. If a thread panics while it holds a lock, it could leave the - //@ data-structure in a bad state. The lock is hence considered *poisoned*. Future attempts to `lock` it will fail. - //@ Above, we simply assert via `unwrap` that this will never happen. Alternatively, we could have a look at the poisoned - //@ state and attempt to recover from it. - } +//@ `[T]` is the type of an (unsized) *array*, with elements of type `T`. All this means is that there's a contiguous +//@ region of memory, where a bunch of `T` are stored. How many? We can't tell! This is an unsized type. Just like for +//@ trait objects, this means we can only operate on pointers to that type, and these pointers will carry the missing +//@ information - namely, the length. Such a pointer is called a *slice*. As we will see, a slice can be split. +//@ Our function can thus take a borrowed slice, and promise to sort all elements in there. +pub fn sort(data: &mut [T]) { + if data.len() < 2 { return; } - // The function `get` returns the current value of the counter. - pub fn get(&self) -> usize { - let counter = self.0.lock().unwrap(); /*@*/ - *counter /*@*/ + // We decide that the element at 0 is our pivot, and then we move our cursors through the rest of the slice, + // making sure that everything on the left is no larger than the pivot, and everything on the right is no smaller. + let mut lpos = 1; + let mut rpos = data.len(); + /* Invariant: pivot is data[0]; everything with index (0,lpos) is <= pivot; + [rpos,len) is >= pivot; lpos < rpos */ + loop { + // **Exercise 13.1**: Complete this Quicksort loop. You can use `swap` on slices to swap two elements. Write a + // test function for `sort`. + unimplemented!() } + + // Once our cursors met, we need to put the pivot in the right place. + data.swap(0, lpos-1); + + // Finally, we split our slice to sort the two halves. The nice part about slices is that splitting them is cheap: + //@ They are just a pointer to a start address, and a length. We can thus get two pointers, one at the beginning and + //@ one in the middle, and set the lengths appropriately such that they don't overlap. This is what `split_at_mut` does. + //@ Since the two slices don't overlap, there is no aliasing and we can have them both mutably borrowed. + let (part1, part2) = data.split_at_mut(lpos); + //@ The index operation can not only be used to address certain elements, it can also be used for *slicing*: Giving a range + //@ of indices, and obtaining an appropriate part of the slice we started with. Here, we remove the last element from + //@ `part1`, which is the pivot. This makes sure both recursive calls work on strictly smaller slices. + sort(&mut part1[..lpos-1]); /*@*/ + sort(part2); /*@*/ } -// Now our counter is ready for action. -pub fn main() { - let counter = ConcurrentCounter::new(0); +// **Exercise 13.2**: Since `String` implements `PartialEq`, you can now change the function `output_lines` in the previous part +// to call the sort function above. If you did exercise 12.1, you will have slightly more work. Make sure you sort by the matched line +// only, not by filename or line number! - // We clone the counter for the first thread, which increments it by 2 every 15ms. - let counter1 = counter.clone(); - let handle1 = thread::spawn(move || { - for _ in 0..10 { - thread::sleep_ms(15); - counter1.increment(2); - } - }); - - // The second thread increments the counter by 3 every 20ms. - let counter2 = counter.clone(); - let handle2 = thread::spawn(move || { - for _ in 0..10 { - thread::sleep_ms(20); - counter2.increment(3); +// Now, we can sort, e.g., an vector of numbers. +fn sort_nums(data: &mut Vec) { + //@ Vectors support slicing, just like slices do. Here, `..` denotes the full range, which means we want to slice the entire vector. + //@ It is then passed to the `sort` function, which doesn't even know that it is working on data inside a vector. + sort(&mut data[..]); +} + +// ## Arrays +//@ An *array* in Rust is given be the type `[T; n]`, where `n` is some *fixed* number. So, `[f64; 10]` is an array of 10 floating-point +//@ numbers, all one right next to the other in memory. Arrays are sized, and hence can be used like any other type. But we can also +//@ borrow them as slices, e.g., to sort them. +fn sort_array() { + let mut array_of_data: [f64; 5] = [1.0, 3.4, 12.7, -9.12, 0.1]; + sort(&mut array_of_data); +} + +// ## External Dependencies +//@ This leaves us with just one more piece to complete rgrep: Taking arguments from the command-line. We could now directly work on +//@ [`std::env::args`](http://doc.rust-lang.org/stable/std/env/fn.args.html) to gain access to those arguments, and this would become +//@ a pretty boring lesson in string manipulation. Instead, I want to use this opportunity to show how easy it is to benefit from +//@ other people's work in your program. +//@ +//@ For sure, we are not the first to equip a Rust program with support for command-line arguments. Someone must have written a library +//@ for the job, right? Indeed, someone has. Rust has a central repository of published libraries, called [crates.io](https://crates.io/). +//@ It's a bit like [PyPI](https://pypi.python.org/pypi) or the [Ruby Gems](https://rubygems.org/): Everybody can upload their code, +//@ and there's tooling for importing that code into your project. This tooling is provided by `cargo`, the tool we are already using to +//@ build this tutorial. (`cargo` also has support for *publishing* your crate on crates.io, I refer you to [the documentation](http://doc.crates.io/crates-io.html) for more details.) +//@ In this case, we are going to use the [`docopt` crate](https://crates.io/crates/docopt), which creates a parser for command-line +//@ arguments based on the usage string. External dependencies are declared in the `Cargo.toml` file. + +//@ I already prepared that file, but the declaration of the dependency is still commented out. So please open `Cargo.toml` of your workspace +//@ now, and enabled the two commented-out lines. Then do `cargo build`. Cargo will now download the crate from crates.io, compile it, +//@ and link it to your program. In the future, you can do `cargo update` to make it download new versions of crates you depend on. +//@ Note that crates.io is only the default location for dependencies, you can also give it the URL of a git repository or some local +//@ path. All of this is explained in the [Cargo Guide](http://doc.crates.io/guide.html). + +// I disabled the following module (using a rather bad hack), because it only compiles if `docopt` is linked. +// Remove the attribute of the `rgrep` module to enable compilation. +#[cfg(feature = "disabled")] +pub mod rgrep { + // Now that `docopt` is linked, we can first add it to the namespace and then import shorter names with `use`. We also import some other pieces that we will need. + extern crate docopt; + use self::docopt::Docopt; + use part12::{run, Options, OutputMode}; + use std::process; + + // The `USAGE` string documents how the program is to be called. It's written in a format that `docopt` can parse. + static USAGE: &'static str = " +Usage: rgrep [-c] [-s] ... + +Options: + -c, --count Count number of matching lines (rather than printing them). + -s, --sort Sort the lines before printing. +"; + + // This function extracts the rgrep options from the command-line arguments. + fn get_options() -> Options { + // Parse `argv` and exit the program with an error message if it fails. This is taken from the [`docopt` documentation](http://burntsushi.net/rustdoc/docopt/). + //@ The function `and_then` takes a closure from `T` to `Result`, and uses it to transform a `Result` to a + //@ `Result`. This way, we can chain computations that only happen if the previous one succeeded (and the error + //@ type has to stay the same). In case you know about monads, this style of programming will be familiar to you. + //@ There's a similar function for `Option`. `unwrap_or_else` is a bit like `unwrap`, but rather than panicking in + //@ case of an `Err`, it calls the closure. + let args = Docopt::new(USAGE).and_then(|d| d.parse()).unwrap_or_else(|e| e.exit()); + // Now we can get all the values out. + let count = args.get_bool("-c"); + let sort = args.get_bool("-s"); + let pattern = args.get_str(""); + let files = args.get_vec(""); + if count && sort { + println!("Setting both '-c' and '-s' at the same time does not make any sense."); + process::exit(1); } - }); - // Now we watch the threads working on the counter. - for _ in 0..50 { - thread::sleep_ms(5); - println!("Current value: {}", counter.get()); + // We need to make the strings owned to construct the `Options` instance. + //@ If you check all the types carefully, you will notice that `pattern` above is of type `&str`. `str` is the type of a UTF-8 + //@ encoded string, that is, a bunch of bytes in memory (`[u8]`) that are valid according of UTF-8. `str` is unsized. `&str` + //@ stores the address of the character data, and their length. String literals like "this one" are + //@ of type `&'static str`: They point right to the constant section of the binary, so + //@ However, the borrow is valid for as long as the program runs, hence it has lifetime `'static`. Calling + //@ `to_string` will copy the string data into an owned buffer on the heap, and thus convert it to `String`. + let mode = if count { + OutputMode::Count + } else if sort { + OutputMode::SortAndPrint + } else { + OutputMode::Print + }; + Options { + files: files.iter().map(|file| file.to_string()).collect(), + pattern: pattern.to_string(), + output_mode: mode, + } } - // Finally, we wait for all the threads to finish to be sure we can catch the counter's final value. - handle1.join().unwrap(); - handle2.join().unwrap(); - println!("Final value: {}", counter.get()); + // Finally, we can call the `run` function from the previous part on the options extracted using `get_options`. Edit `main.rs` to call this function. + // You can now use `cargo run -- ` to call your program, and see the argument parser and the threads we wrote previously in action! + pub fn main() { + run(get_options()); /*@*/ + } } -// **Exercise 14.1**: Besides `Mutex`, there's also [`RwLock`](http://doc.rust-lang.org/stable/std/sync/struct.RwLock.html), which -// provides two ways of locking: One that grants only read-only access, to any number of concurrent readers, and another one -// for exclusive write access. (Notice that this is the same pattern we already saw with shared vs. mutable borrows.) Change -// the code above to use `RwLock`, such that multiple calls to `get` can be executed at the same time. -// -// **Exercise 14.2**: Add an operation `compare_and_inc(&self, test: usize, by: usize)` that increments the counter by -// `by` *only if* the current value is `test`. - -//@ ## Sync -//@ In part 12, we talked about types that are marked `Send` and thus can be moved to another thread. However, we did *not* -//@ talk about the question whether a borrow is `Send`. For `&mut T`, the answer is: It is `Send` whenever `T` is send. -//@ `&mut` allows moving values back and forth, it is even possible to [`swap`](http://doc.rust-lang.org/beta/std/mem/fn.swap.html) -//@ the contents of two mutably borrowed values. So in terms of concurrency, sending a mutable borrow is very much like -//@ sending full ownership, in the sense that it can be used to move the object to another thread. -//@ -//@ But what about `&T`, a shared borrow? Without interior mutability, it would always be all-right to send such values. -//@ After all, no mutation can be performed, so there can be as many threads accessing the data as we like. In the -//@ presence of interior mutability though, the story gets more complicated. Rust introduces another marker trait for -//@ this purpose: `Sync`. A type `T` is `Sync` if `&T` is `Send`. Just like `Send`, `Sync` has a default implementation -//@ and is thus automatically implemented for a data-structure *if* all its members implement it. -//@ -//@ Almost all the types we saw so far are `Sync`, with the exception of `Rc`. Remember that a shared borrow is good enough -//@ for cloning, and we don't want other threads to clone our local `Rc`, so it must not be `Sync`. The rule of `Mutex` -//@ is to enforce synchronization, so it should not be entirely surprising that `Mutex` is `Send` *and* `Sync` provided that -//@ `T` is `Send`. -//@ -//@ In the next part, we will learn about a type called `RefCell` that is `Send`, but not `Sync`. -//@ -//@ You may be curious whether there is a type that's `Sync`, but not `Send`. There are indeed rather esoteric examples -//@ of such types, but that's not a topic I want to go into. In case you are curious, there's a -//@ [Rust RFC](https://github.com/rust-lang/rfcs/blob/master/text/0458-send-improvements.md), which contains a type `RcMut` that would be `Sync` and not `Send`. -//@ You may also be interested in [this blog post](https://huonw.github.io/blog/2015/02/some-notes-on-send-and-sync/) on the topic. +// **Exercise 13.3**: Wouldn't it be nice if rgrep supported regular expressions? There's already a crate that does all the parsing and matching on regular +// expression, it's called [regex](https://crates.io/crates/regex). Add this crate to the dependencies of your workspace, add an option ("-r") to switch +// the pattern to regular-expression mode, and change `filter_lines` to honor this option. The documentation of regex is available from its crates.io site. +// (You won't be able to use the `regex!` macro if you are on the stable or beta channel of Rust. But it wouldn't help for our use-case anyway.) -//@ [index](main.html) | [previous](part13.html) | [next](main.html) +//@ [index](main.html) | [previous](part13.html) | [next](part15.html) diff --git a/src/part15.rs b/src/part15.rs index 7365421..a783689 100644 --- a/src/part15.rs +++ b/src/part15.rs @@ -1,5 +1,145 @@ -// Rust-101, Part 15: Interior Mutability (cont.), RefCell, Cell, Drop -// =================================================================== +// Rust-101, Part 15: Mutex, Interior Mutability (cont.), Sync +// =========================================================== + +use std::sync::{Arc, Mutex}; +use std::thread; + +//@ We already saw that we can use `Arc` to share memory between threads. However, `Arc` can only provide *read-only* +//@ access to memory: Since there is aliasing, Rust cannot, in general, permit mutation. If however, +//@ some care would be taken at run-time, then mutation would still be all right: We have to ensure that whenever +//@ someone changes the data, nobody else is looking at it. In other words, we need a *critical section* or (as it +//@ is called in Rust) a [`Mutex`](http://doc.rust-lang.org/stable/std/sync/struct.Mutex.html). Some other languages also call this a *lock*. +//@ +//@ As an example, let us write a concurrent counter. As usual in Rust, we first have to think about our data layout. +//@ In case of the mutex, this means we have to declare the type of the data that we want to be protected. In Rust, +//@ a `Mutex` protects data, not code - and it is impossible to access the data in any other way. This is generally considered +//@ good style, but other languages typically lack the ability to actually enforce this. +//@ Of course, we want multiple threads to have access to this `Mutex`, so we wrap it in an `Arc`. +//@ +//@ Rather than giving every field a name, a struct can also be defined by just giving a sequence of types (similar +//@ to how a variant of an `enum` is defined). This is called a *tuple struct*. It is often used when constructing +//@ a *newtype*, as we do here: `ConcurrentCounter` is essentially just a new name for `Arc>`. However, +//@ is is a locally declared types, so we can give it an inherent implementation and implement traits for it. Since the +//@ field is private, nobody outside this module can even know the type we are wrapping. + +// The derived `Clone` implementation will clone the `Arc`, so all clones will actually talk about the same counter. +#[derive(Clone)] +struct ConcurrentCounter(Arc>); + +impl ConcurrentCounter { + // The constructor just wraps the constructors of `Arc` and `Mutex`. + pub fn new(val: usize) -> Self { + ConcurrentCounter(Arc::new(Mutex::new(val))) /*@*/ + } + + //@ The core operation is, of course, `increment`. The type may be surprising at first: A shared borrow? + //@ How can this be, since `increment` definitely modifies the counter? We already discussed above that `Mutex` is + //@ a way to get around this restriction in Rust. This phenomenon of data that can be mutated through a shared + //@ borrow is called *interior mutability*: We are changing the inner parts of the object, but seen from the outside, + //@ this does not count as "mutation". This stands in contrast to *exterior mutability*, which is the kind of + //@ mutability we saw so far, where one piece of data is replaced by something else of the same type. If you are familiar + //@ with languages like ML, you can compare this to how something of type `ref` permits mutation, even though it is + //@ itself a functional value (more precisely, a location) like all the others. + //@ + //@ Interior mutability breaks the rules of Rust that I outlined earlier: There is aliasing (a shared borrow) and mutation. + //@ The reason that this still works is careful programming of the primitives for interior mutability - in this case, that's + //@ `Mutex`. It has to ensure with dynamic checks, at run-time, that things don't fall apart. In particular, it has to ensure + //@ that the data covered by the mutex can only ever be accessed from inside a critical section. This is where Rust's type + //@ system comes into play: With its discipline of ownership and borrowing, it can enforce such rules. Let's see how this goes. + pub fn increment(&self, by: usize) { + // `lock` on a mutex returns a *guard*, giving access to the data contained in the mutex. + //@ (We will discuss the `unwrap` soon.) `.0` is how we access the first component of a tuple or a struct. + let mut counter = self.0.lock().unwrap(); + //@ The guard is another example of a smart pointer, and it can be used as if it were a pointer to the data protected + //@ by the lock. + *counter = *counter + by; + //@ At the end of the function, `counter` is dropped and the mutex is available again. + //@ This can only happen when full ownership of the guard is given up. In particular, it is impossible for us + //@ to borrow some of its content, release the lock of the mutex, and subsequently access the protected data without holding + //@ the lock. Enforcing the locking discipline is expressible in the Rust type system, so we don't have to worry + //@ about data races *even though* we are mutating shared memory! + //@ + //@ One of the subtle aspects of locking is *poisoning*. If a thread panics while it holds a lock, it could leave the + //@ data-structure in a bad state. The lock is hence considered *poisoned*. Future attempts to `lock` it will fail. + //@ Above, we simply assert via `unwrap` that this will never happen. Alternatively, we could have a look at the poisoned + //@ state and attempt to recover from it. + } + + // The function `get` returns the current value of the counter. + pub fn get(&self) -> usize { + let counter = self.0.lock().unwrap(); /*@*/ + *counter /*@*/ + } +} + +// Now our counter is ready for action. +pub fn main() { + let counter = ConcurrentCounter::new(0); + + // We clone the counter for the first thread, which increments it by 2 every 15ms. + let counter1 = counter.clone(); + let handle1 = thread::spawn(move || { + for _ in 0..10 { + thread::sleep_ms(15); + counter1.increment(2); + } + }); + + // The second thread increments the counter by 3 every 20ms. + let counter2 = counter.clone(); + let handle2 = thread::spawn(move || { + for _ in 0..10 { + thread::sleep_ms(20); + counter2.increment(3); + } + }); + + // Now we watch the threads working on the counter. + for _ in 0..50 { + thread::sleep_ms(5); + println!("Current value: {}", counter.get()); + } + + // Finally, we wait for all the threads to finish to be sure we can catch the counter's final value. + handle1.join().unwrap(); + handle2.join().unwrap(); + println!("Final value: {}", counter.get()); +} + +// **Exercise 14.1**: Besides `Mutex`, there's also [`RwLock`](http://doc.rust-lang.org/stable/std/sync/struct.RwLock.html), which +// provides two ways of locking: One that grants only read-only access, to any number of concurrent readers, and another one +// for exclusive write access. (Notice that this is the same pattern we already saw with shared vs. mutable borrows.) Change +// the code above to use `RwLock`, such that multiple calls to `get` can be executed at the same time. +// +// **Exercise 14.2**: Add an operation `compare_and_inc(&self, test: usize, by: usize)` that increments the counter by +// `by` *only if* the current value is `test`. + +//@ ## Sync +//@ In part 12, we talked about types that are marked `Send` and thus can be moved to another thread. However, we did *not* +//@ talk about the question whether a borrow is `Send`. For `&mut T`, the answer is: It is `Send` whenever `T` is send. +//@ `&mut` allows moving values back and forth, it is even possible to [`swap`](http://doc.rust-lang.org/beta/std/mem/fn.swap.html) +//@ the contents of two mutably borrowed values. So in terms of concurrency, sending a mutable borrow is very much like +//@ sending full ownership, in the sense that it can be used to move the object to another thread. +//@ +//@ But what about `&T`, a shared borrow? Without interior mutability, it would always be all-right to send such values. +//@ After all, no mutation can be performed, so there can be as many threads accessing the data as we like. In the +//@ presence of interior mutability though, the story gets more complicated. Rust introduces another marker trait for +//@ this purpose: `Sync`. A type `T` is `Sync` if `&T` is `Send`. Just like `Send`, `Sync` has a default implementation +//@ and is thus automatically implemented for a data-structure *if* all its members implement it. +//@ +//@ Almost all the types we saw so far are `Sync`, with the exception of `Rc`. Remember that a shared borrow is good enough +//@ for cloning, and we don't want other threads to clone our local `Rc`, so it must not be `Sync`. The rule of `Mutex` +//@ is to enforce synchronization, so it should not be entirely surprising that `Mutex` is `Send` *and* `Sync` provided that +//@ `T` is `Send`. +//@ +//@ In the next part, we will learn about a type called `RefCell` that is `Send`, but not `Sync`. +//@ +//@ You may be curious whether there is a type that's `Sync`, but not `Send`. There are indeed rather esoteric examples +//@ of such types, but that's not a topic I want to go into. In case you are curious, there's a +//@ [Rust RFC](https://github.com/rust-lang/rfcs/blob/master/text/0458-send-improvements.md), which contains a type `RcMut` that would be `Sync` and not `Send`. +//@ You may also be interested in [this blog post](https://huonw.github.io/blog/2015/02/some-notes-on-send-and-sync/) on the topic. + +// FIXME TODO some old outdated explanation FIXME TODO //@ [`RefCell`](http://doc.rust-lang.org/beta/std/cell/struct.RefCell.html) //@ [`is very much like `RwLock`, but it's not thread-safe: "Locking" is done without atomic operations. @@ -22,4 +162,5 @@ //@ the thread-safe `RwLock`. And finally, in case a distinction between readers and writers is not helpful, one can use the //@ more efficient `Mutex`. + //@ [index](main.html) | [previous](part14.html) | [next](main.html) diff --git a/workspace/src/part11.rs b/workspace/src/part11.rs index cc2a252..ac19371 100644 --- a/workspace/src/part11.rs +++ b/workspace/src/part11.rs @@ -1,97 +1,65 @@ -// Rust-101, Part 11: Trait Objects, Box, Rc, Lifetime bounds -// ========================================================== +// Rust-101, Part 11: Trait Objects, Box, Lifetime bounds +// ====================================================== -mod callbacks { - // For now, we just decide that the callbacks have an argument of type `i32`. - struct CallbacksV1 { - callbacks: Vec, - } - - /* struct CallbacksV2 { - callbacks: Vec, - } */ - pub struct Callbacks { - callbacks: Vec>, - } +// For now, we just decide that the callbacks have an argument of type `i32`. +struct CallbacksV1 { + callbacks: Vec, +} - impl Callbacks { - // Now we can provide some functions. The constructor should be straight-forward. - pub fn new() -> Self { - unimplemented!() - } +/* struct CallbacksV2 { + callbacks: Vec, +} */ - // Registration simply stores the callback. - pub fn register(&mut self, callback: Box) { - unimplemented!() - } +pub struct Callbacks { + callbacks: Vec>, +} - // And here we call all the stored callbacks. - pub fn call(&mut self, val: i32) { - // Since they are of type `FnMut`, we need to mutably iterate. Notice that boxes dereference implicitly. - for callback in self.callbacks.iter_mut() { - unimplemented!() - } - } +impl Callbacks { + // Now we can provide some functions. The constructor should be straight-forward. + pub fn new() -> Self { + unimplemented!() } - // Now we are ready for the demo. - pub fn demo(c: &mut Callbacks) { - c.register(Box::new(|val| println!("Callback 1: {}", val))); - c.call(0); - - let mut count: usize = 0; - c.register(Box::new(move |val| { - count = count+1; - println!("Callback 2, {}. time: {}", count, val); - } )); - c.call(1); c.call(2); + // Registration simply stores the callback. + pub fn register(&mut self, callback: Box) { + unimplemented!() } -} -// Remember to edit `main.rs` to run the demo. -pub fn main() { - let mut c = callbacks::Callbacks::new(); - callbacks::demo(&mut c); -} - -mod callbacks_clone { - - use std::rc::Rc; - - #[derive(Clone)] - pub struct Callbacks { - callbacks: Vec>, + // We can also write a generic version of `register`, such that it will be instantiated with some concrete closure type `F` + // and do the creation of the `Box` and the conversion from `F` to `FnMut(i32)` itself. + + pub fn register_generic(&mut self, callback: F) { + unimplemented!() } - impl Callbacks { - pub fn new() -> Self { - unimplemented!() - } - - // For the `register` function, we don't actually have to use trait objects in the argument. - - pub fn register(&mut self, callback: F) { + // And here we call all the stored callbacks. + pub fn call(&mut self, val: i32) { + // Since they are of type `FnMut`, we need to mutably iterate. + for callback in self.callbacks.iter_mut() { unimplemented!() } - - pub fn call(&mut self, val: i32) { - // We only need a shared iterator here. `Rc` also implicitly dereferences, so we can simply call the callback. - for callback in self.callbacks.iter() { - unimplemented!() - } - } } +} + +// Now we are ready for the demo. Remember to edit `main.rs` to run it. +pub fn main() { + let mut c = Callbacks::new(); + c.register(Box::new(|val| println!("Callback 1: {}", val))); + c.call(0); - // The demo works just as above. Our counting callback doesn't work anymore though, because we are using `Fn` now. - fn demo(c: &mut Callbacks) { - c.register(|val| println!("Callback 1: {}", val)); - c.call(0); c.call(1); + { + let mut count: usize = 0; + c.register_generic(move |val| { + count = count+1; + println!("Callback 2: {} ({}. time)", val, count); + } ); } + c.call(1); c.call(2); } + // **Exercise 11.1**: We made the arbitrary choice of using `i32` for the arguments. Generalize the data-structures above // to work with an arbitrary type `T` that's passed to the callbacks. Since you need to call multiple callbacks with the // same `t: T`, you will either have to restrict `T` to `Copy` types, or pass a borrow. - diff --git a/workspace/src/part12.rs b/workspace/src/part12.rs index 4996ac1..a351995 100644 --- a/workspace/src/part12.rs +++ b/workspace/src/part12.rs @@ -1,128 +1,110 @@ -// Rust-101, Part 12: Concurrency, Arc, Send -// ========================================= - -use std::io::prelude::*; -use std::{io, fs, thread}; -use std::sync::mpsc::{sync_channel, SyncSender, Receiver}; -use std::sync::Arc; - - -// Before we come to the actual code, we define a data-structure `Options` to store all the information we need -// to complete the job: Which files to work on, which pattern to look for, and how to output.
-#[derive(Clone,Copy)] -pub enum OutputMode { - Print, - SortAndPrint, - Count, -} -use self::OutputMode::*; +// Rust-101, Part 12: Rc, Interior Mutability, Cell, RefCell +// ========================================================= -pub struct Options { - pub files: Vec, - pub pattern: String, - pub output_mode: OutputMode, -} +use std::rc::Rc; +use std::cell::{Cell, RefCell}; -// The first function reads the files, and sends every line over the `out_channel`. -fn read_files(options: Arc, out_channel: SyncSender) { - for file in options.files.iter() { - // First, we open the file, ignoring any errors. - let file = fs::File::open(file).unwrap(); - // Then we obtain a `BufReader` for it, which provides the `lines` function. - let file = io::BufReader::new(file); - for line in file.lines() { - let line = line.unwrap(); - // Now we send the line over the channel, ignoring the possibility of `send` failing. - out_channel.send(line).unwrap(); - } - } - // When we drop the `out_channel`, it will be closed, which the other end can notice. + +#[derive(Clone)] +struct Callbacks { + callbacks: Vec>, } -// The second function filters the lines it receives through `in_channel` with the pattern, and sends -// matches via `out_channel`. -fn filter_lines(options: Arc, - in_channel: Receiver, - out_channel: SyncSender) { - // We can simply iterate over the channel, which will stop when the channel is closed. - for line in in_channel.iter() { - // `contains` works on lots of types of patterns, but in particular, we can use it to test whether - // one string is contained in another. This is another example of Rust using traits as substitute for overloading. - if line.contains(&options.pattern) { - unimplemented!() - } +impl Callbacks { + pub fn new() -> Self { + unimplemented!() } -} -// The third function performs the output operations, receiving the relevant lines on its `in_channel`. -fn output_lines(options: Arc, in_channel: Receiver) { - match options.output_mode { - Print => { - // Here, we just print every line we see. - for line in in_channel.iter() { - unimplemented!() - } - }, - Count => { - // We are supposed to count the number of matching lines. There's a convenient iterator adapter that - // we can use for this job. - unimplemented!() - }, - SortAndPrint => { - // We are asked to sort the matching lines before printing. So let's collect them all in a local vector... - let mut data: Vec = in_channel.iter().collect(); - // ...and implement the actual sorting later. + // Registration works just like last time, except that we are creating an `Rc` now. + pub fn register(&mut self, callback: F) { + unimplemented!() + } + + pub fn call(&self, val: i32) { + // We only need a shared iterator here. Since `Rc` is a smart pointer, we can directly call the callback. + for callback in self.callbacks.iter() { unimplemented!() } } } -// With the operations of the three threads defined, we can now implement a function that performs grepping according -// to some given options. -pub fn run(options: Options) { - // We move the `options` into an `Arc`, as that's what the thread workers expect. - let options = Arc::new(options); - - // This sets up the channels. We use a `sync_channel` with buffer-size of 16 to avoid needlessly filling RAM. - let (line_sender, line_receiver) = sync_channel(16); - let (filtered_sender, filtered_receiver) = sync_channel(16); - - // Spawn the read thread: `thread::spawn` takes a closure that is run in a new thread. - let options1 = options.clone(); - let handle1 = thread::spawn(move || read_files(options1, line_sender)); - - // Same with the filter thread. - let options2 = options.clone(); - let handle2 = thread::spawn(move || { - filter_lines(options2, line_receiver, filtered_sender) - }); - - // And the output thread. - let options3 = options.clone(); - let handle3 = thread::spawn(move || output_lines(options3, filtered_receiver)); - - // Finally, wait until all three threads did their job. - handle1.join().unwrap(); - handle2.join().unwrap(); - handle3.join().unwrap(); +// Time for a demo! +fn demo(c: &mut Callbacks) { + c.register(|val| println!("Callback 1: {}", val)); + c.call(0); c.clone().call(1); } -// Now we have all the pieces together for testing our rgrep with some hard-coded options. pub fn main() { - let options = Options { - files: vec!["src/part10.rs".to_string(), - "src/part11.rs".to_string(), - "src/part12.rs".to_string()], - pattern: "let".to_string(), - output_mode: Print - }; - run(options); + let mut c = Callbacks::new(); + demo(&mut c); +} + +// ## Interior Mutability + +// So, let us put our counter in a `Cell`, and replicate the example from the previous part. +fn demo_cell(c: &mut Callbacks) { + { + let count = Cell::new(0); + // Again, we have to move ownership if the `count` into the environment closure. + c.register(move |val| { + // In here, all we have is a shared borrow of our environment. But that's good enough for the `get` and `set` of the cell! + let new_count = count.get()+1; + count.set(new_count); + println!("Callback 2: {} ({}. time)", val, new_count); + } ); + } + + c.call(2); c.clone().call(3); } -// **Exercise 12.1**: Change rgrep such that it prints not only the matching lines, but also the name of the file -// and the number of the line in the file. You will have to change the type of the channels from `String` to something -// that records this extra information. +// ## `RefCell` + +// Our final version of `Callbacks` puts the closure environment into a `RefCell`. +#[derive(Clone)] +struct CallbacksMut { + callbacks: Vec>>, +} + +impl CallbacksMut { + pub fn new() -> Self { + unimplemented!() + } + + pub fn register(&mut self, callback: F) { + let cell = Rc::new(RefCell::new(callback)); + unimplemented!() + } + + pub fn call(&mut self, val: i32) { + for callback in self.callbacks.iter() { + // We have to *explicitly* borrow the contents of a `RefCell` by calling `borrow` or `borrow_mut`. + let mut closure = callback.borrow_mut(); + // Unfortunately, Rust's auto-dereference of pointers is not clever enough here. We thus have to explicitly + // dereference the smart pointer and obtain a mutable borrow of the content. + (&mut *closure)(val); + } + } +} + +// Now we can repeat the demo from the previous part - but this time, our `CallbacksMut` type +// can be cloned. +fn demo_mut(c: &mut CallbacksMut) { + c.register(|val| println!("Callback 1: {}", val)); + c.call(0); + + { + let mut count: usize = 0; + c.register(move |val| { + count = count+1; + println!("Callback 2: {} ({}. time)", val, count); + } ); + } + c.call(1); c.clone().call(2); +} +// **Exercise 12.1**: Change the type of `call` to ask only for a shared borrow. Then write some piece of code using only the available, public +// interface of `CallbacksMut` such that a reentrant call to `call` is happening, and the program aborts because the `RefCell` refuses to hand +// out a second mutable borrow to its content. diff --git a/workspace/src/part13.rs b/workspace/src/part13.rs index 2be4353..501fb7d 100644 --- a/workspace/src/part13.rs +++ b/workspace/src/part13.rs @@ -1,107 +1,128 @@ -// Rust-101, Part 13: Slices, Arrays, External Dependencies -// ======================================================== - +// Rust-101, Part 13: Concurrency, Arc, Send +// ========================================= + +use std::io::prelude::*; +use std::{io, fs, thread}; +use std::sync::mpsc::{sync_channel, SyncSender, Receiver}; +use std::sync::Arc; + + +// Before we come to the actual code, we define a data-structure `Options` to store all the information we need +// to complete the job: Which files to work on, which pattern to look for, and how to output.
+#[derive(Clone,Copy)] +pub enum OutputMode { + Print, + SortAndPrint, + Count, +} +use self::OutputMode::*; -// ## Slices +pub struct Options { + pub files: Vec, + pub pattern: String, + pub output_mode: OutputMode, +} -pub fn sort(data: &mut [T]) { - if data.len() < 2 { return; } - // We decide that the element at 0 is our pivot, and then we move our cursors through the rest of the slice, - // making sure that everything on the left is no larger than the pivot, and everything on the right is no smaller. - let mut lpos = 1; - let mut rpos = data.len(); - /* Invariant: pivot is data[0]; everything with index (0,lpos) is <= pivot; - [rpos,len) is >= pivot; lpos < rpos */ - loop { - // **Exercise 13.1**: Complete this Quicksort loop. You can use `swap` on slices to swap two elements. Write a - // test function for `sort`. - unimplemented!() +// The first function reads the files, and sends every line over the `out_channel`. +fn read_files(options: Arc, out_channel: SyncSender) { + for file in options.files.iter() { + // First, we open the file, ignoring any errors. + let file = fs::File::open(file).unwrap(); + // Then we obtain a `BufReader` for it, which provides the `lines` function. + let file = io::BufReader::new(file); + for line in file.lines() { + let line = line.unwrap(); + // Now we send the line over the channel, ignoring the possibility of `send` failing. + out_channel.send(line).unwrap(); + } } - - // Once our cursors met, we need to put the pivot in the right place. - data.swap(0, lpos-1); - - // Finally, we split our slice to sort the two halves. The nice part about slices is that splitting them is cheap: - let (part1, part2) = data.split_at_mut(lpos); - unimplemented!() + // When we drop the `out_channel`, it will be closed, which the other end can notice. } -// **Exercise 13.2**: Since `String` implements `PartialEq`, you can now change the function `output_lines` in the previous part -// to call the sort function above. If you did exercise 12.1, you will have slightly more work. Make sure you sort by the matched line -// only, not by filename or line number! +// The second function filters the lines it receives through `in_channel` with the pattern, and sends +// matches via `out_channel`. +fn filter_lines(options: Arc, + in_channel: Receiver, + out_channel: SyncSender) { + // We can simply iterate over the channel, which will stop when the channel is closed. + for line in in_channel.iter() { + // `contains` works on lots of types of patterns, but in particular, we can use it to test whether + // one string is contained in another. This is another example of Rust using traits as substitute for overloading. + if line.contains(&options.pattern) { + unimplemented!() + } + } +} -// Now, we can sort, e.g., an vector of numbers. -fn sort_nums(data: &mut Vec) { - sort(&mut data[..]); +// The third function performs the output operations, receiving the relevant lines on its `in_channel`. +fn output_lines(options: Arc, in_channel: Receiver) { + match options.output_mode { + Print => { + // Here, we just print every line we see. + for line in in_channel.iter() { + unimplemented!() + } + }, + Count => { + // We are supposed to count the number of matching lines. There's a convenient iterator adapter that + // we can use for this job. + unimplemented!() + }, + SortAndPrint => { + // We are asked to sort the matching lines before printing. So let's collect them all in a local vector... + let mut data: Vec = in_channel.iter().collect(); + // ...and implement the actual sorting later. + unimplemented!() + } + } } -// ## Arrays -fn sort_array() { - let mut array_of_data: [f64; 5] = [1.0, 3.4, 12.7, -9.12, 0.1]; - sort(&mut array_of_data); +// With the operations of the three threads defined, we can now implement a function that performs grepping according +// to some given options. +pub fn run(options: Options) { + // We move the `options` into an `Arc`, as that's what the thread workers expect. + let options = Arc::new(options); + + // This sets up the channels. We use a `sync_channel` with buffer-size of 16 to avoid needlessly filling RAM. + let (line_sender, line_receiver) = sync_channel(16); + let (filtered_sender, filtered_receiver) = sync_channel(16); + + // Spawn the read thread: `thread::spawn` takes a closure that is run in a new thread. + let options1 = options.clone(); + let handle1 = thread::spawn(move || read_files(options1, line_sender)); + + // Same with the filter thread. + let options2 = options.clone(); + let handle2 = thread::spawn(move || { + filter_lines(options2, line_receiver, filtered_sender) + }); + + // And the output thread. + let options3 = options.clone(); + let handle3 = thread::spawn(move || output_lines(options3, filtered_receiver)); + + // Finally, wait until all three threads did their job. + handle1.join().unwrap(); + handle2.join().unwrap(); + handle3.join().unwrap(); } -// ## External Dependencies - - -// I disabled the following module (using a rather bad hack), because it only compiles if `docopt` is linked. -// Remove the attribute of the `rgrep` module to enable compilation. -#[cfg(feature = "disabled")] -pub mod rgrep { - // Now that `docopt` is linked, we can first root it in the namespace and then import it with `use`. We also import some other pieces that we will need. - extern crate docopt; - use self::docopt::Docopt; - use part12::{run, Options, OutputMode}; - use std::process; - - // The `USAGE` string documents how the program is to be called. It's written in a format that `docopt` can parse. - static USAGE: &'static str = " -Usage: rgrep [-c] [-s] ... - -Options: - -c, --count Count number of matching lines (rather than printing them). - -s, --sort Sort the lines before printing. -"; - - // This function extracts the rgrep options from the command-line arguments. - fn get_options() -> Options { - // Parse `argv` and exit the program with an error message if it fails. This is taken from the [`docopt` documentation](http://burntsushi.net/rustdoc/docopt/). - let args = Docopt::new(USAGE).and_then(|d| d.parse()).unwrap_or_else(|e| e.exit()); - // Now we can get all the values out. - let count = args.get_bool("-c"); - let sort = args.get_bool("-s"); - let pattern = args.get_str(""); - let files = args.get_vec(""); - if count && sort { - println!("Setting both '-c' and '-s' at the same time does not make any sense."); - process::exit(1); - } +// Now we have all the pieces together for testing our rgrep with some hard-coded options. +pub fn main() { + let options = Options { + files: vec!["src/part10.rs".to_string(), + "src/part11.rs".to_string(), + "src/part12.rs".to_string()], + pattern: "let".to_string(), + output_mode: Print + }; + run(options); +} - // We need to make the strings owned to construct the `Options` instance. - let mode = if count { - OutputMode::Count - } else if sort { - OutputMode::SortAndPrint - } else { - OutputMode::Print - }; - Options { - files: files.iter().map(|file| file.to_string()).collect(), - pattern: pattern.to_string(), - output_mode: mode, - } - } +// **Exercise 12.1**: Change rgrep such that it prints not only the matching lines, but also the name of the file +// and the number of the line in the file. You will have to change the type of the channels from `String` to something +// that records this extra information. - // Finally, we can call the `run` function from the previous part on the options extracted using `get_options`. Edit `main.rs` to call this function. - // You can now use `cargo run -- ` to call your program, and see the argument parser and the threads we wrote previously in action! - pub fn main() { - unimplemented!() - } -} -// **Exercise 13.3**: Wouldn't it be nice if rgrep supported regular expressions? There's already a crate that does all the parsing and matching on regular -// expression, it's called [regex](https://crates.io/crates/regex). Add this crate to the dependencies of your workspace, add an option ("-r") to switch -// the pattern to regular-expression mode, and change `filter_lines` to honor this option. The documentation of regex is available from its crates.io site. -// (You won't be able to use the `regex!` macro if you are on the stable or beta channel of Rust. But it wouldn't help for our use-case anyway.) diff --git a/workspace/src/part14.rs b/workspace/src/part14.rs index d939265..6e007aa 100644 --- a/workspace/src/part14.rs +++ b/workspace/src/part14.rs @@ -1,72 +1,107 @@ -// Rust-101, Part 14: Mutex, Interior Mutability, Sync -// =================================================== +// Rust-101, Part 14: Slices, Arrays, External Dependencies +// ======================================================== -use std::sync::{Arc, Mutex}; -use std::thread; +// ## Slices -// The derived `Clone` implementation will clone the `Arc`, so all clones will actually talk about the same counter. -#[derive(Clone)] -struct ConcurrentCounter(Arc>); +pub fn sort(data: &mut [T]) { + if data.len() < 2 { return; } -impl ConcurrentCounter { - // The constructor just wraps the constructors of `Arc` and `Mutex`. - pub fn new(val: usize) -> Self { + // We decide that the element at 0 is our pivot, and then we move our cursors through the rest of the slice, + // making sure that everything on the left is no larger than the pivot, and everything on the right is no smaller. + let mut lpos = 1; + let mut rpos = data.len(); + /* Invariant: pivot is data[0]; everything with index (0,lpos) is <= pivot; + [rpos,len) is >= pivot; lpos < rpos */ + loop { + // **Exercise 13.1**: Complete this Quicksort loop. You can use `swap` on slices to swap two elements. Write a + // test function for `sort`. unimplemented!() } - pub fn increment(&self, by: usize) { - // `lock` on a mutex returns a *guard*, giving access to the data contained in the mutex. - let mut counter = self.0.lock().unwrap(); - *counter = *counter + by; - } + // Once our cursors met, we need to put the pivot in the right place. + data.swap(0, lpos-1); - // The function `get` returns the current value of the counter. - pub fn get(&self) -> usize { - unimplemented!() - } + // Finally, we split our slice to sort the two halves. The nice part about slices is that splitting them is cheap: + let (part1, part2) = data.split_at_mut(lpos); + unimplemented!() } -// Now our counter is ready for action. -pub fn main() { - let counter = ConcurrentCounter::new(0); +// **Exercise 13.2**: Since `String` implements `PartialEq`, you can now change the function `output_lines` in the previous part +// to call the sort function above. If you did exercise 12.1, you will have slightly more work. Make sure you sort by the matched line +// only, not by filename or line number! - // We clone the counter for the first thread, which increments it by 2 every 15ms. - let counter1 = counter.clone(); - let handle1 = thread::spawn(move || { - for _ in 0..10 { - thread::sleep_ms(15); - counter1.increment(2); - } - }); - - // The second thread increments the counter by 3 every 20ms. - let counter2 = counter.clone(); - let handle2 = thread::spawn(move || { - for _ in 0..10 { - thread::sleep_ms(20); - counter2.increment(3); +// Now, we can sort, e.g., an vector of numbers. +fn sort_nums(data: &mut Vec) { + sort(&mut data[..]); +} + +// ## Arrays +fn sort_array() { + let mut array_of_data: [f64; 5] = [1.0, 3.4, 12.7, -9.12, 0.1]; + sort(&mut array_of_data); +} + +// ## External Dependencies + + +// I disabled the following module (using a rather bad hack), because it only compiles if `docopt` is linked. +// Remove the attribute of the `rgrep` module to enable compilation. +#[cfg(feature = "disabled")] +pub mod rgrep { + // Now that `docopt` is linked, we can first add it to the namespace and then import shorter names with `use`. We also import some other pieces that we will need. + extern crate docopt; + use self::docopt::Docopt; + use part12::{run, Options, OutputMode}; + use std::process; + + // The `USAGE` string documents how the program is to be called. It's written in a format that `docopt` can parse. + static USAGE: &'static str = " +Usage: rgrep [-c] [-s] ... + +Options: + -c, --count Count number of matching lines (rather than printing them). + -s, --sort Sort the lines before printing. +"; + + // This function extracts the rgrep options from the command-line arguments. + fn get_options() -> Options { + // Parse `argv` and exit the program with an error message if it fails. This is taken from the [`docopt` documentation](http://burntsushi.net/rustdoc/docopt/). + let args = Docopt::new(USAGE).and_then(|d| d.parse()).unwrap_or_else(|e| e.exit()); + // Now we can get all the values out. + let count = args.get_bool("-c"); + let sort = args.get_bool("-s"); + let pattern = args.get_str(""); + let files = args.get_vec(""); + if count && sort { + println!("Setting both '-c' and '-s' at the same time does not make any sense."); + process::exit(1); } - }); - // Now we watch the threads working on the counter. - for _ in 0..50 { - thread::sleep_ms(5); - println!("Current value: {}", counter.get()); + // We need to make the strings owned to construct the `Options` instance. + let mode = if count { + OutputMode::Count + } else if sort { + OutputMode::SortAndPrint + } else { + OutputMode::Print + }; + Options { + files: files.iter().map(|file| file.to_string()).collect(), + pattern: pattern.to_string(), + output_mode: mode, + } } - // Finally, we wait for all the threads to finish to be sure we can catch the counter's final value. - handle1.join().unwrap(); - handle2.join().unwrap(); - println!("Final value: {}", counter.get()); + // Finally, we can call the `run` function from the previous part on the options extracted using `get_options`. Edit `main.rs` to call this function. + // You can now use `cargo run -- ` to call your program, and see the argument parser and the threads we wrote previously in action! + pub fn main() { + unimplemented!() + } } -// **Exercise 14.1**: Besides `Mutex`, there's also [`RwLock`](http://doc.rust-lang.org/stable/std/sync/struct.RwLock.html), which -// provides two ways of locking: One that grants only read-only access, to any number of concurrent readers, and another one -// for exclusive write access. (Notice that this is the same pattern we already saw with shared vs. mutable borrows.) Change -// the code above to use `RwLock`, such that multiple calls to `get` can be executed at the same time. -// -// **Exercise 14.2**: Add an operation `compare_and_inc(&self, test: usize, by: usize)` that increments the counter by -// `by` *only if* the current value is `test`. - +// **Exercise 13.3**: Wouldn't it be nice if rgrep supported regular expressions? There's already a crate that does all the parsing and matching on regular +// expression, it's called [regex](https://crates.io/crates/regex). Add this crate to the dependencies of your workspace, add an option ("-r") to switch +// the pattern to regular-expression mode, and change `filter_lines` to honor this option. The documentation of regex is available from its crates.io site. +// (You won't be able to use the `regex!` macro if you are on the stable or beta channel of Rust. But it wouldn't help for our use-case anyway.) diff --git a/workspace/src/part15.rs b/workspace/src/part15.rs index 4dca116..d006b23 100644 --- a/workspace/src/part15.rs +++ b/workspace/src/part15.rs @@ -1,4 +1,76 @@ -// Rust-101, Part 15: Interior Mutability (cont.), RefCell, Cell, Drop -// =================================================================== +// Rust-101, Part 15: Mutex, Interior Mutability (cont.), Sync +// =========================================================== + +use std::sync::{Arc, Mutex}; +use std::thread; + + +// The derived `Clone` implementation will clone the `Arc`, so all clones will actually talk about the same counter. +#[derive(Clone)] +struct ConcurrentCounter(Arc>); + +impl ConcurrentCounter { + // The constructor just wraps the constructors of `Arc` and `Mutex`. + pub fn new(val: usize) -> Self { + unimplemented!() + } + + pub fn increment(&self, by: usize) { + // `lock` on a mutex returns a *guard*, giving access to the data contained in the mutex. + let mut counter = self.0.lock().unwrap(); + *counter = *counter + by; + } + + // The function `get` returns the current value of the counter. + pub fn get(&self) -> usize { + unimplemented!() + } +} + +// Now our counter is ready for action. +pub fn main() { + let counter = ConcurrentCounter::new(0); + + // We clone the counter for the first thread, which increments it by 2 every 15ms. + let counter1 = counter.clone(); + let handle1 = thread::spawn(move || { + for _ in 0..10 { + thread::sleep_ms(15); + counter1.increment(2); + } + }); + + // The second thread increments the counter by 3 every 20ms. + let counter2 = counter.clone(); + let handle2 = thread::spawn(move || { + for _ in 0..10 { + thread::sleep_ms(20); + counter2.increment(3); + } + }); + + // Now we watch the threads working on the counter. + for _ in 0..50 { + thread::sleep_ms(5); + println!("Current value: {}", counter.get()); + } + + // Finally, we wait for all the threads to finish to be sure we can catch the counter's final value. + handle1.join().unwrap(); + handle2.join().unwrap(); + println!("Final value: {}", counter.get()); +} + +// **Exercise 14.1**: Besides `Mutex`, there's also [`RwLock`](http://doc.rust-lang.org/stable/std/sync/struct.RwLock.html), which +// provides two ways of locking: One that grants only read-only access, to any number of concurrent readers, and another one +// for exclusive write access. (Notice that this is the same pattern we already saw with shared vs. mutable borrows.) Change +// the code above to use `RwLock`, such that multiple calls to `get` can be executed at the same time. +// +// **Exercise 14.2**: Add an operation `compare_and_inc(&self, test: usize, by: usize)` that increments the counter by +// `by` *only if* the current value is `test`. + + +// FIXME TODO some old outdated explanation FIXME TODO +