-
Notifications
You must be signed in to change notification settings - Fork 9
Description
I believe we're running into a manifestation of this panic in hyper
: hyperium/hyper#2112
See the following Rollbar payload:
{
"body": {
"trace": {
"frames": [
{
"lineno": 259,
"filename": "/root/.cargo/registry/src/github.com-1ecc6299db9ec823/hyper-0.12.36/src/client/conn.rs"
},
{
"method": "actix::init_application::{{closure}}::{{closure}}::h03cddbc1bd61d1cf",
"filename": ""
},
{
"method": "std::panicking::rust_panic_with_hook::h71e6a073d87de1f5",
"lineno": 595,
"filename": "/rustc/2fd73fabe469357a12c2c974c140f67e7cdd76d0/library/std/src/panicking.rs"
},
{
"method": "std::panicking::begin_panic::{{closure}}::h84a4ac6142ac8c51",
"filename": ""
},
{
"method": "std::sys_common::backtrace::__rust_end_short_backtrace::h8df64b270704a2ee",
"filename": ""
},
{
"method": "std::panicking::begin_panic::hbb31f5852bbdf237",
"filename": ""
},
{
"method": "futures::future::chain::Chain<A,B,C>::poll::h3a95f13ed59d540e",
"filename": ""
},
{
"method": "<futures::future::map_err::MapErr<A,F> as futures::future::Future>::poll::hfa0f91984607d4e8",
"filename": ""
},
{
"method": "<futures::future::map::Map<A,F> as futures::future::Future>::poll::h4934b71eb53f71a3",
"filename": ""
},
{
"method": "futures::future::chain::Chain<A,B,C>::poll::h71d6c440b50f4359",
"filename": ""
},
{
"method": "futures::future::chain::Chain<A,B,C>::poll::hd3fe1a06fbd9db6b",
"filename": ""
},
{
"method": "<futures::future::poll_fn::PollFn<F> as futures::future::Future>::poll::h84a35bb95baa4eaa",
"filename": ""
},
{
"method": "<hyper::client::ResponseFuture as futures::future::Future>::poll::h9088ea04a6a9a727",
"filename": ""
},
{
"method": "<futures::future::map::Map<A,F> as futures::future::Future>::poll::h3e432334c050d77b",
"filename": ""
},
{
"method": "<futures::future::map_err::MapErr<A,F> as futures::future::Future>::poll::hfac78dc6d9fd7b21",
"filename": ""
},
{
"method": "futures::task_impl::std::set::h7edfc0098bc68c22",
"filename": ""
},
{
"method": "std::thread::local::LocalKey<T>::with::he853f51dd671a8e7",
"filename": ""
},
{
"method": "tokio_current_thread::Entered<P>::block_on::hfce4221cafa84bff",
"filename": ""
},
{
"method": "tokio_executor::global::with_default::h1b74ce8974ffcfbe",
"filename": ""
},
{
"method": "tokio_timer::timer::handle::with_default::h5e10e3d7fcaffb21",
"filename": ""
},
{
"method": "tokio_reactor::with_default::h65ab2eac53ad4746",
"filename": ""
},
{
"method": "tokio::runtime::current_thread::runtime::Runtime::block_on::hf78c57ace46f88d2",
"filename": ""
},
{
"method": "std::sys_common::backtrace::__rust_begin_short_backtrace::hf7994801795d86a3",
"filename": ""
},
{
"method": "core::ops::function::FnOnce::call_once{{vtable.shim}}::h3052f9825eb4eaf4",
"filename": ""
},
{
"method": "<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once::h61144a2be4ee36d8",
"lineno": 1521,
"filename": "/rustc/2fd73fabe469357a12c2c974c140f67e7cdd76d0/library/alloc/src/boxed.rs"
},
{
"method": "<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once::hcf5d395fdd120c17",
"lineno": 1521,
"filename": "/rustc/2fd73fabe469357a12c2c974c140f67e7cdd76d0/library/alloc/src/boxed.rs"
},
{
"method": "std::sys::unix::thread::Thread::new::thread_start::hb5e40d3d934ebb7a",
"lineno": 71,
"filename": "/rustc/2fd73fabe469357a12c2c974c140f67e7cdd76d0/library/std/src/sys/unix/thread.rs"
},
{
"method": "start_thread",
"filename": ""
},
{
"method": "clone",
"filename": ""
}
],
"exception": {
"message": "dispatch dropped without returning error",
"class": "<panic>",
"description": "dispatch dropped without returning error"
}
}
},
"uuid": "f4c92254bf3e416ab6fb8e7db0d9b4ce",
"language": "rust",
"level": "error",
"timestamp": 1630532885,
"title": "dispatch dropped without returning error",
"custom": {},
"environment": "PRODUCTION",
"framework": "unknown",
"metadata": {}
}
We have an Actix app (the actix::init_application
in the trace) that at the very start does report_panics!
, and then when an error occurs during any of the endpoint handlers we do:
rollbar.build_report()
.from_message(&format!("{}", error))
.with_level("error")
.with_metadata(metadata_json)
.send();
We aren't using hyper
anywhere else, aside from indirectly through rollbar-rs
, so from what I can gather from the above trace, the rollbar.build_report()...send();
code panics within hyper
with the dispatch dropped without returning error
message, and that panic is caught in the hook set up by report_panics!
and then successfully sent to Rollbar.
In the aforementioned hyper
issue, @seanmonstar states (IIUC) that this panic can arise when the tokio
executer drops the background tasks spawned by the hyper
client before those tasks have managed to determine if the connection was closed, and that this can happen when the client is used from multiple executers.
I see in
Lines 521 to 537 in b4ad68a
let job = self | |
.http_client | |
.request(request) | |
.map(|res| Some(ResponseStatus::from(res.status()))) | |
.map_err(|error| { | |
println!("Error while sending a report to Rollbar."); | |
print!("The error returned by Rollbar was: {:?}.\n\n", error); | |
None::<ResponseStatus> | |
}); | |
thread::spawn(move || { | |
current_thread::Runtime::new() | |
.unwrap() | |
.block_on(job) | |
.unwrap() | |
}) |
Maybe there's something about how the client is used in conjunction with tokio
's current_thread::Runtime
that's not safe when using the same rollbar client (which then in turn means reusing the same hyper
client)?