lib/siphash.c: mark expected switch fall-throughs

In preparation to enabling -Wimplicit-fallthrough, mark switch cases
where we are expecting to fall through.

This patch aims to suppress up to 18 missing-break-in-switch false
positives on some architectures.

Cc: Gustavo A. R. Silva <gustavo@embeddedor.com>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Reviewed-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Stephen Rothwell 2019-04-16 17:27:20 +10:00 committed by Greg Kroah-Hartman
parent ae0c2d7255
commit ba2e544075

View File

@ -68,11 +68,11 @@ u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48;
case 6: b |= ((u64)end[5]) << 40;
case 5: b |= ((u64)end[4]) << 32;
case 7: b |= ((u64)end[6]) << 48; /* fall through */
case 6: b |= ((u64)end[5]) << 40; /* fall through */
case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= le32_to_cpup(data); break;
case 3: b |= ((u64)end[2]) << 16;
case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@ -101,11 +101,11 @@ u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48;
case 6: b |= ((u64)end[5]) << 40;
case 5: b |= ((u64)end[4]) << 32;
case 7: b |= ((u64)end[6]) << 48; /* fall through */
case 6: b |= ((u64)end[5]) << 40; /* fall through */
case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= get_unaligned_le32(end); break;
case 3: b |= ((u64)end[2]) << 16;
case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@ -268,11 +268,11 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48;
case 6: b |= ((u64)end[5]) << 40;
case 5: b |= ((u64)end[4]) << 32;
case 7: b |= ((u64)end[6]) << 48; /* fall through */
case 6: b |= ((u64)end[5]) << 40; /* fall through */
case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= le32_to_cpup(data); break;
case 3: b |= ((u64)end[2]) << 16;
case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@ -301,11 +301,11 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48;
case 6: b |= ((u64)end[5]) << 40;
case 5: b |= ((u64)end[4]) << 32;
case 7: b |= ((u64)end[6]) << 48; /* fall through */
case 6: b |= ((u64)end[5]) << 40; /* fall through */
case 5: b |= ((u64)end[4]) << 32; /* fall through */
case 4: b |= get_unaligned_le32(end); break;
case 3: b |= ((u64)end[2]) << 16;
case 3: b |= ((u64)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
@ -431,7 +431,7 @@ u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
v0 ^= m;
}
switch (left) {
case 3: b |= ((u32)end[2]) << 16;
case 3: b |= ((u32)end[2]) << 16; /* fall through */
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
@ -454,7 +454,7 @@ u32 __hsiphash_unaligned(const void *data, size_t len,
v0 ^= m;
}
switch (left) {
case 3: b |= ((u32)end[2]) << 16;
case 3: b |= ((u32)end[2]) << 16; /* fall through */
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}