Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix clippy::cast_possible_wrap #1476

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ build --aspects=@rules_rust//rust:defs.bzl%rustfmt_aspect
build --aspects=@rules_rust//rust:defs.bzl%rust_clippy_aspect

# TODO(aaronmondal): Extend these flags until we can run with clippy::pedantic.
build --@rules_rust//:clippy_flags=-Dwarnings,-Dclippy::uninlined_format_args,-Dclippy::manual_string_new,-Dclippy::manual_let_else,-Dclippy::single_match_else,-Dclippy::redundant_closure_for_method_calls,-Dclippy::semicolon_if_nothing_returned,-Dclippy::unreadable_literal,-Dclippy::range_plus_one,-Dclippy::inconsistent_struct_constructor,-Dclippy::match_wildcard_for_single_variants,-Dclippy::implicit_clone,-Dclippy::needless_pass_by_value,-Dclippy::explicit_deref_methods,-Dclippy::trivially_copy_pass_by_ref,-Dclippy::unnecessary_wraps,-Dclippy::cast_lossless,-Dclippy::map_unwrap_or,-Dclippy::ref_as_ptr,-Dclippy::inline_always,-Dclippy::redundant_else,-Dclippy::return_self_not_must_use,-Dclippy::match_same_arms,-Dclippy::explicit_iter_loop,-Dclippy::items_after_statements,-Dclippy::explicit_into_iter_loop,-Dclippy::stable_sort_primitive,-Dclippy::ptr_as_ptr,-Dclippy::needless_raw_string_hashes
build --@rules_rust//:clippy_flags=-Dwarnings,-Dclippy::uninlined_format_args,-Dclippy::manual_string_new,-Dclippy::manual_let_else,-Dclippy::single_match_else,-Dclippy::redundant_closure_for_method_calls,-Dclippy::semicolon_if_nothing_returned,-Dclippy::unreadable_literal,-Dclippy::range_plus_one,-Dclippy::inconsistent_struct_constructor,-Dclippy::match_wildcard_for_single_variants,-Dclippy::implicit_clone,-Dclippy::needless_pass_by_value,-Dclippy::explicit_deref_methods,-Dclippy::trivially_copy_pass_by_ref,-Dclippy::unnecessary_wraps,-Dclippy::cast_lossless,-Dclippy::map_unwrap_or,-Dclippy::ref_as_ptr,-Dclippy::inline_always,-Dclippy::redundant_else,-Dclippy::return_self_not_must_use,-Dclippy::match_same_arms,-Dclippy::explicit_iter_loop,-Dclippy::items_after_statements,-Dclippy::explicit_into_iter_loop,-Dclippy::stable_sort_primitive,-Dclippy::ptr_as_ptr,-Dclippy::needless_raw_string_hashes,-Dclippy::cast_possible_wrap
build --@rules_rust//:clippy.toml=//:clippy.toml

test --@rules_rust//:rustfmt.toml=//:.rustfmt.toml
Expand Down
9 changes: 6 additions & 3 deletions nativelink-service/src/bytestream_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,7 @@ impl ByteStreamServer {
active_stream_guard.graceful_finish();

Ok(Response::new(WriteResponse {
committed_size: expected_size as i64,
committed_size: expected_size.try_into().unwrap_or(i64::MAX),
}))
}

Expand Down Expand Up @@ -547,7 +547,10 @@ impl ByteStreamServer {
let active_uploads = self.active_uploads.lock();
if let Some((received_bytes, _maybe_idle_stream)) = active_uploads.get(uuid.as_ref()) {
return Ok(Response::new(QueryWriteStatusResponse {
committed_size: received_bytes.load(Ordering::Acquire) as i64,
committed_size: received_bytes
.load(Ordering::Acquire)
.try_into()
.unwrap_or(i64::MAX),
// If we are in the active_uploads map, but the value is None,
// it means the stream is not complete.
complete: false,
Expand All @@ -567,7 +570,7 @@ impl ByteStreamServer {
}));
};
Ok(Response::new(QueryWriteStatusResponse {
committed_size: item_size as i64,
committed_size: item_size.try_into().unwrap_or(i64::MAX),
complete: true,
}))
}
Expand Down
2 changes: 1 addition & 1 deletion nativelink-service/src/cas_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ impl CasServer {
}
if page_token_matched {
directories.push(directory);
if directories.len() as i32 == page_size {
if directories.len().try_into().unwrap_or(i32::MAX) == page_size {
break;
}
}
Expand Down
4 changes: 3 additions & 1 deletion nativelink-service/tests/ac_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,9 @@ async fn one_item_update_test() -> Result<(), Box<dyn std::error::Error>> {
..Default::default()
};

let size_bytes = get_encoded_proto_size(&action_result)? as i64;
let size_bytes = get_encoded_proto_size(&action_result)?
.try_into()
.unwrap_or(i64::MAX);

let raw_response = update_action_result(
&ac_server,
Expand Down
2 changes: 1 addition & 1 deletion nativelink-service/tests/bep_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ async fn publish_build_tool_event_stream_test() -> Result<(), Box<dyn std::error
// Send off the requests and validate the responses.
for (sequence_number, request) in requests.iter().enumerate().map(|(i, req)| {
// Sequence numbers are 1-indexed, while `.enumerate()` indexes from 0.
(i as i64 + 1, req)
(i.try_into().unwrap_or(i64::MAX).saturating_add(1), req)
}) {
let encoded_request = encode_stream_proto(request)?;
request_tx.send(Frame::data(encoded_request)).await?;
Expand Down
28 changes: 14 additions & 14 deletions nativelink-service/tests/bytestream_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,13 +224,13 @@ pub async fn chunked_stream_receives_all_data() -> Result<(), Box<dyn std::error
.await?;

// Write empty set of data (clients are allowed to do this.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.data = vec![].into();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
.await?;

// Write final bit of data.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.data = raw_data[BYTE_SPLIT_OFFSET..].into();
write_request.finish_write = true;
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand Down Expand Up @@ -317,7 +317,7 @@ pub async fn resume_write_success() -> Result<(), Box<dyn std::error::Error>> {
make_stream_and_writer_spawn(bs_server, Some(CompressionEncoding::Gzip));
{
// Write the remainder of our data.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.finish_write = true;
write_request.data = WRITE_DATA[BYTE_SPLIT_OFFSET..].into();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand Down Expand Up @@ -398,7 +398,7 @@ pub async fn restart_write_success() -> Result<(), Box<dyn std::error::Error>> {
}
{
// Write the remainder of our data.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.finish_write = true;
write_request.data = WRITE_DATA[BYTE_SPLIT_OFFSET..].into();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand Down Expand Up @@ -477,7 +477,7 @@ pub async fn restart_mid_stream_write_success() -> Result<(), Box<dyn std::error
}
{
// Write the remainder of our data.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.finish_write = true;
write_request.data = WRITE_DATA[BYTE_SPLIT_OFFSET..].into();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand Down Expand Up @@ -541,7 +541,7 @@ pub async fn ensure_write_is_not_done_until_write_request_is_set(
}
{
// Write our EOF.
write_request.write_offset = WRITE_DATA.len() as i64;
write_request.write_offset = WRITE_DATA.len().try_into().unwrap_or(i64::MAX);
write_request.finish_write = true;
write_request.data.clear();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand All @@ -562,7 +562,7 @@ pub async fn ensure_write_is_not_done_until_write_request_is_set(
.err_tip(|| "bs_server.write returned an error")?
.into_inner(),
WriteResponse {
committed_size: WRITE_DATA.len() as i64
committed_size: WRITE_DATA.len().try_into().unwrap_or(i64::MAX)
},
"Expected Responses to match"
);
Expand Down Expand Up @@ -611,7 +611,7 @@ pub async fn out_of_order_data_fails() -> Result<(), Box<dyn std::error::Error>>
}
{
// Write data it already has.
write_request.write_offset = (BYTE_SPLIT_OFFSET - 1) as i64;
write_request.write_offset = (BYTE_SPLIT_OFFSET - 1).try_into().unwrap_or(i64::MAX);
write_request.data = WRITE_DATA[(BYTE_SPLIT_OFFSET - 1)..].into();
tx.send(Frame::data(encode_stream_proto(&write_request)?))
.await?;
Expand All @@ -622,7 +622,7 @@ pub async fn out_of_order_data_fails() -> Result<(), Box<dyn std::error::Error>>
);
{
// Make sure stream was closed.
write_request.write_offset = (BYTE_SPLIT_OFFSET - 1) as i64;
write_request.write_offset = (BYTE_SPLIT_OFFSET - 1).try_into().unwrap_or(i64::MAX);
write_request.data = WRITE_DATA[(BYTE_SPLIT_OFFSET - 1)..].into();
assert!(
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand Down Expand Up @@ -745,7 +745,7 @@ pub async fn chunked_stream_reads_small_set_of_data() -> Result<(), Box<dyn std:
let read_request = ReadRequest {
resource_name: format!("{}/blobs/{}/{}", INSTANCE_NAME, HASH1, VALUE1.len()),
read_offset: 0,
read_limit: VALUE1.len() as i64,
read_limit: VALUE1.len().try_into().unwrap_or(i64::MAX),
};
let mut read_stream = bs_server
.read(Request::new(read_request))
Expand Down Expand Up @@ -790,7 +790,7 @@ pub async fn chunked_stream_reads_10mb_of_data() -> Result<(), Box<dyn std::erro
let read_request = ReadRequest {
resource_name: format!("{}/blobs/{}/{}", INSTANCE_NAME, HASH1, raw_data.len()),
read_offset: 0,
read_limit: raw_data.len() as i64,
read_limit: raw_data.len().try_into().unwrap_or(i64::MAX),
};
let mut read_stream = bs_server
.read(Request::new(read_request))
Expand Down Expand Up @@ -920,14 +920,14 @@ pub async fn test_query_write_status_smoke_test() -> Result<(), Box<dyn std::err
assert_eq!(
data.into_inner(),
QueryWriteStatusResponse {
committed_size: write_request.data.len() as i64,
committed_size: write_request.data.len().try_into().unwrap_or(i64::MAX),
complete: false,
}
);
}

// Finish writing our data.
write_request.write_offset = BYTE_SPLIT_OFFSET as i64;
write_request.write_offset = BYTE_SPLIT_OFFSET.try_into().unwrap_or(i64::MAX);
write_request.data = raw_data[BYTE_SPLIT_OFFSET..].into();
write_request.finish_write = true;
tx.send(Frame::data(encode_stream_proto(&write_request)?))
Expand All @@ -942,7 +942,7 @@ pub async fn test_query_write_status_smoke_test() -> Result<(), Box<dyn std::err
assert_eq!(
data.into_inner(),
QueryWriteStatusResponse {
committed_size: raw_data.len() as i64,
committed_size: raw_data.len().try_into().unwrap_or(i64::MAX),
complete: true,
}
);
Expand Down
22 changes: 11 additions & 11 deletions nativelink-service/tests/cas_server_test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ async fn store_one_item_existence() -> Result<(), Box<dyn std::error::Error>> {
instance_name: INSTANCE_NAME.to_string(),
blob_digests: vec![Digest {
hash: HASH1.to_string(),
size_bytes: VALUE.len() as i64,
size_bytes: VALUE.len().try_into().unwrap_or(i64::MAX),
}],
digest_function: digest_function::Value::Sha256.into(),
}))
Expand Down Expand Up @@ -136,15 +136,15 @@ async fn has_three_requests_one_bad_hash() -> Result<(), Box<dyn std::error::Err
blob_digests: vec![
Digest {
hash: HASH1.to_string(),
size_bytes: VALUE.len() as i64,
size_bytes: VALUE.len().try_into().unwrap_or(i64::MAX),
},
Digest {
hash: BAD_HASH.to_string(),
size_bytes: VALUE.len() as i64,
size_bytes: VALUE.len().try_into().unwrap_or(i64::MAX),
},
Digest {
hash: HASH1.to_string(),
size_bytes: VALUE.len() as i64,
size_bytes: VALUE.len().try_into().unwrap_or(i64::MAX),
},
],
digest_function: digest_function::Value::Sha256.into(),
Expand All @@ -169,7 +169,7 @@ async fn update_existing_item() -> Result<(), Box<dyn std::error::Error>> {

let digest = Digest {
hash: HASH1.to_string(),
size_bytes: VALUE2.len() as i64,
size_bytes: VALUE2.len().try_into().unwrap_or(i64::MAX),
};

store
Expand Down Expand Up @@ -226,11 +226,11 @@ async fn batch_read_blobs_read_two_blobs_success_one_fail() -> Result<(), Box<dy

let digest1 = Digest {
hash: HASH1.to_string(),
size_bytes: VALUE1.len() as i64,
size_bytes: VALUE1.len().try_into().unwrap_or(i64::MAX),
};
let digest2 = Digest {
hash: HASH2.to_string(),
size_bytes: VALUE2.len() as i64,
size_bytes: VALUE2.len().try_into().unwrap_or(i64::MAX),
};
{
// Insert dummy data.
Expand Down Expand Up @@ -584,11 +584,11 @@ async fn batch_update_blobs_two_items_existence_with_third_missing(

let digest1 = Digest {
hash: HASH1.to_string(),
size_bytes: VALUE1.len() as i64,
size_bytes: VALUE1.len().try_into().unwrap_or(i64::MAX),
};
let digest2 = Digest {
hash: HASH2.to_string(),
size_bytes: VALUE2.len() as i64,
size_bytes: VALUE2.len().try_into().unwrap_or(i64::MAX),
};

{
Expand Down Expand Up @@ -649,12 +649,12 @@ async fn batch_update_blobs_two_items_existence_with_third_missing(
blob_digests: vec![
Digest {
hash: HASH1.to_string(),
size_bytes: VALUE1.len() as i64,
size_bytes: VALUE1.len().try_into().unwrap_or(i64::MAX),
},
missing_digest.clone(),
Digest {
hash: HASH2.to_string(),
size_bytes: VALUE2.len() as i64,
size_bytes: VALUE2.len().try_into().unwrap_or(i64::MAX),
},
],
digest_function: digest_function::Value::Sha256.into(),
Expand Down
5 changes: 3 additions & 2 deletions nativelink-store/src/grpc_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ impl StoreDriver for GrpcStore {
};

let write_offset = local_state.bytes_received;
local_state.bytes_received += data.len() as i64;
local_state.bytes_received += data.len().try_into().unwrap_or(i64::MAX);

Some((
Ok(WriteRequest {
Expand Down Expand Up @@ -729,7 +729,8 @@ impl StoreDriver for GrpcStore {
))
}
};
let length = data.len() as i64;
let length = data.len().try_into().unwrap_or(i64::MAX);

// This is the usual exit from the loop at EOF.
if length == 0 {
let eof_result = local_state
Expand Down
7 changes: 5 additions & 2 deletions nativelink-store/src/s3_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,10 @@ where
Ok(head_object_output) => {
if self.consider_expired_after_s != 0 {
if let Some(last_modified) = head_object_output.last_modified {
let now_s = (self.now_fn)().unix_timestamp() as i64;
let now_s = (self.now_fn)()
.unix_timestamp()
.try_into()
.unwrap_or(i64::MAX);
if last_modified.secs() + self.consider_expired_after_s <= now_s {
return Some((RetryResult::Ok(None), state));
}
Expand Down Expand Up @@ -449,7 +452,7 @@ where
.put_object()
.bucket(&self.bucket)
.key(s3_path.clone())
.content_length(sz as i64)
.content_length(sz.try_into().unwrap_or(i64::MAX))
.body(ByteStream::from_body_1_x(BodyWrapper {
reader: rx,
size: sz,
Expand Down
Loading
Loading