From 7db9a00c4d9bf4996177e4946241885234afb5c0 Mon Sep 17 00:00:00 2001
From: Steve French <stfrench@microsoft.com>
Date: Fri, 26 Jul 2024 16:30:23 -0500
Subject: [PATCH] smb3: mark compression as CONFIG_EXPERIMENTAL and fix missing
compression operation
Move SMB3.1.1 compression code into experimental config option,
and fix the compress mount option. Implement unchained LZ77
"plain" compression algorithm as per MS-XCA specification
section "2.3 Plain LZ77 Compression Algorithm Details".
Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
Signed-off-by: Steve French <stfrench@microsoft.com>
---
fs/smb/client/Kconfig | 14 ++
fs/smb/client/Makefile | 2 +
fs/smb/client/cifs_debug.c | 7 +-
fs/smb/client/cifsglob.h | 3 +-
fs/smb/client/compress.c | 50 ++++++
fs/smb/client/compress.h | 109 +++++++++++++
fs/smb/client/compress/lz77.c | 211 +++++++++++++++++++++++++
fs/smb/client/compress/lz77.h | 285 ++++++++++++++++++++++++++++++++++
fs/smb/client/fs_context.c | 7 +-
9 files changed, 684 insertions(+), 4 deletions(-)
create mode 100644 fs/smb/client/compress.c
create mode 100644 fs/smb/client/compress.h
create mode 100644 fs/smb/client/compress/lz77.c
create mode 100644 fs/smb/client/compress/lz77.h
@@ -204,4 +204,18 @@ config CIFS_ROOT
Most people say N here.
+config CIFS_COMPRESSION
+ bool "SMB message compression (Experimental)"
+ depends on CIFS
+ default n
+ help
+ Enables over-the-wire message compression for SMB 3.1.1
+ mounts when negotiated with the server.
+
+ Only write requests with data size >= PAGE_SIZE will be
+ compressed to avoid wasting resources.
+
+ Say Y here if you want SMB traffic to be compressed.
+ If unsure, say N.
+
endif
@@ -33,3 +33,5 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+
+cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
@@ -349,6 +349,9 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, ",ACL");
#ifdef CONFIG_CIFS_SWN_UPCALL
seq_puts(m, ",WITNESS");
+#endif
+#ifdef CONFIG_CIFS_COMPRESSION
+ seq_puts(m, ",COMPRESSION");
#endif
seq_putc(m, '\n');
seq_printf(m, "CIFSMaxBufSize: %d\n", CIFSMaxBufSize);
@@ -475,7 +478,9 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
}
seq_puts(m, "\nCompression: ");
- if (!server->compression.requested)
+ if (!IS_ENABLED(CONFIG_CIFS_COMPRESSION))
+ seq_puts(m, "no built-in support");
+ else if (!server->compression.requested)
seq_puts(m, "disabled on mount");
else if (server->compression.enabled)
seq_printf(m, "enabled (%s)", compression_alg_str(server->compression.alg));
@@ -556,7 +556,7 @@ struct smb_version_operations {
bool (*dir_needs_close)(struct cifsFileInfo *);
long (*fallocate)(struct file *, struct cifs_tcon *, int, loff_t,
loff_t);
- /* init transform request - used for encryption for now */
+ /* init transform (compress/encrypt) request */
int (*init_transform_rq)(struct TCP_Server_Info *, int num_rqst,
struct smb_rqst *, struct smb_rqst *);
int (*is_transform_hdr)(void *buf);
@@ -1899,6 +1899,7 @@ static inline bool is_replayable_error(int error)
#define CIFS_HAS_CREDITS 0x0400 /* already has credits */
#define CIFS_TRANSFORM_REQ 0x0800 /* transform request before sending */
#define CIFS_NO_SRV_RSP 0x1000 /* there is no server response */
+#define CIFS_COMPRESS_REQ 0x4000 /* compress request before sending */
/* Security Flags: indicate type of session setup needed */
#define CIFSSEC_MAY_SIGN 0x00001
new file mode 100644
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * This file implements I/O compression support for SMB2 messages (SMB 3.1.1 only).
+ * See compress/ for implementation details of each algorithm.
+ *
+ * References:
+ * MS-SMB2 "3.1.4.4 Compressing the Message"
+ * MS-SMB2 "3.1.5.3 Decompressing the Chained Message"
+ * MS-XCA - for details of the supported algorithms
+ */
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/uio.h>
+
+#include "cifsglob.h"
+#include "../common/smb2pdu.h"
+#include "cifsproto.h"
+#include "smb2proto.h"
+
+#include "compress/lz77.h"
+#include "compress.h"
+
+int smb_compress(void *buf, const void *data, size_t *len)
+{
+ struct smb2_compression_hdr *hdr;
+ size_t buf_len, data_len;
+ int ret;
+
+ buf_len = sizeof(struct smb2_write_req);
+ data_len = *len;
+ *len = 0;
+
+ hdr = buf;
+ hdr->ProtocolId = SMB2_COMPRESSION_TRANSFORM_ID;
+ hdr->OriginalCompressedSegmentSize = cpu_to_le32(data_len);
+ hdr->Offset = cpu_to_le32(buf_len);
+ hdr->Flags = SMB2_COMPRESSION_FLAG_NONE;
+ hdr->CompressionAlgorithm = SMB3_COMPRESS_LZ77;
+
+ /* XXX: add other algs here as they're implemented */
+ ret = lz77_compress(data, data_len, buf + SMB_COMPRESS_HDR_LEN + buf_len, &data_len);
+ if (!ret)
+ *len = SMB_COMPRESS_HDR_LEN + buf_len + data_len;
+
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * This file implements I/O compression support for SMB2 messages (SMB 3.1.1 only).
+ * See compress/ for implementation details of each algorithm.
+ *
+ * References:
+ * MS-SMB2 "3.1.4.4 Compressing the Message" - for compression details
+ * MS-SMB2 "3.1.5.3 Decompressing the Chained Message" - for decompression details
+ * MS-XCA - for details of the supported algorithms
+ */
+#ifndef _SMB_COMPRESS_H
+#define _SMB_COMPRESS_H
+
+#include <linux/uio.h>
+#include <linux/kernel.h>
+#include "../common/smb2pdu.h"
+#include "cifsglob.h"
+
+/* sizeof(smb2_compression_hdr) - sizeof(OriginalPayloadSize) */
+#define SMB_COMPRESS_HDR_LEN 16
+/* sizeof(smb2_compression_payload_hdr) - sizeof(OriginalPayloadSize) */
+#define SMB_COMPRESS_PAYLOAD_HDR_LEN 8
+#define SMB_COMPRESS_MIN_LEN PAGE_SIZE
+
+struct smb_compress_ctx {
+ struct TCP_Server_Info *server;
+ struct work_struct work;
+ struct mid_q_entry *mid;
+
+ void *buf; /* compressed data */
+ void *data; /* uncompressed data */
+ size_t len;
+};
+
+#ifdef CONFIG_CIFS_COMPRESSION
+int smb_compress(void *buf, const void *data, size_t *len);
+
+/**
+ * smb_compress_alg_valid() - Validate a compression algorithm.
+ * @alg: Compression algorithm to check.
+ * @valid_none: Conditional check whether NONE algorithm should be
+ * considered valid or not.
+ *
+ * If @alg is SMB3_COMPRESS_NONE, this function returns @valid_none.
+ *
+ * Note that 'NONE' (0) compressor type is considered invalid in protocol
+ * negotiation, as it's never requested to/returned from the server.
+ *
+ * Return: true if @alg is valid/supported, false otherwise.
+ */
+static __always_inline int smb_compress_alg_valid(__le16 alg, bool valid_none)
+{
+ if (alg == SMB3_COMPRESS_NONE)
+ return valid_none;
+
+ if (alg == SMB3_COMPRESS_LZ77 || alg == SMB3_COMPRESS_PATTERN)
+ return true;
+
+ return false;
+}
+
+/**
+ * should_compress() - Determines if a request (write) or the response to a
+ * request (read) should be compressed.
+ * @tcon: tcon of the request is being sent to
+ * @buf: buffer with an SMB2 READ/WRITE request
+ *
+ * Return: true iff:
+ * - compression was successfully negotiated with server
+ * - server has enabled compression for the share
+ * - it's a read or write request
+ * - if write, request length is >= SMB_COMPRESS_MIN_LEN
+ *
+ * Return false otherwise.
+ */
+static __always_inline bool should_compress(const struct cifs_tcon *tcon, const void *buf)
+{
+ const struct smb2_hdr *shdr = buf;
+
+ if (!tcon || !tcon->ses || !tcon->ses->server)
+ return false;
+
+ if (!tcon->ses->server->compression.enabled)
+ return false;
+
+ if (!(tcon->share_flags & SMB2_SHAREFLAG_COMPRESS_DATA))
+ return false;
+
+ if (shdr->Command == SMB2_WRITE) {
+ const struct smb2_write_req *req = buf;
+
+ return (req->Length >= SMB_COMPRESS_MIN_LEN);
+ }
+
+ return (shdr->Command == SMB2_READ);
+}
+/*
+ * #else !CONFIG_CIFS_COMPRESSION ...
+ * These routines should not be called when CONFIG_CIFS_COMPRESSION disabled
+ * #define smb_compress(arg1, arg2, arg3) (-EOPNOTSUPP)
+ * #define smb_compress_alg_valid(arg1, arg2) (-EOPNOTSUPP)
+ * #define should_compress(arg1, arg2) (false)
+ */
+#endif /* !CONFIG_CIFS_COMPRESSION */
+#endif /* _SMB_COMPRESS_H */
new file mode 100644
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * Implementation of the LZ77 "plain" compression algorithm, as per MS-XCA spec.
+ */
+#include <linux/slab.h>
+#include "lz77.h"
+
+static __always_inline u32 hash3(const u8 *ptr)
+{
+ return lz77_hash32(lz77_read32(ptr) & 0xffffff, LZ77_HASH_LOG);
+}
+
+static u8 *write_match(u8 *dst, u8 **nib, u32 dist, u32 len)
+{
+ len -= 3;
+ dist--;
+ dist <<= 3;
+
+ if (len < 7) {
+ lz77_write16(dst, dist + len);
+ return dst + 2;
+ }
+
+ dist |= 7;
+ lz77_write16(dst, dist);
+ dst += 2;
+ len -= 7;
+
+ if (!*nib) {
+ *nib = dst;
+ lz77_write8(dst, min_t(unsigned int, len, 15));
+ dst++;
+ } else {
+ **nib |= min_t(unsigned int, len, 15) << 4;
+ *nib = NULL;
+ }
+
+ if (len < 15)
+ return dst;
+
+ len -= 15;
+ if (len < 255) {
+ lz77_write8(dst, len);
+ return dst + 1;
+ }
+
+ lz77_write8(dst, 0xff);
+ dst++;
+
+ len += 7 + 15;
+ if (len <= 0xffff) {
+ lz77_write16(dst, len);
+ return dst + 2;
+ }
+
+ lz77_write16(dst, 0);
+ dst += 2;
+ lz77_write32(dst, len);
+
+ return dst + 4;
+}
+
+static u8 *write_literals(u8 *dst, const u8 *dst_end, const u8 *src, size_t count,
+ struct lz77_flags *flags)
+{
+ const u8 *end = src + count;
+
+ while (src < end) {
+ size_t c = lz77_min(count, 32 - flags->count);
+
+ if (dst + c >= dst_end)
+ return ERR_PTR(-EFAULT);
+
+ if (lz77_copy(dst, src, c))
+ return ERR_PTR(-EFAULT);
+
+ dst += c;
+ src += c;
+ count -= c;
+
+ flags->val <<= c;
+ flags->count += c;
+ if (flags->count == 32) {
+ lz77_write32(flags->pos, flags->val);
+ flags->count = 0;
+ flags->pos = dst;
+ dst += 4;
+ }
+ }
+
+ return dst;
+}
+
+static __always_inline bool is_valid_match(const u32 dist, const u32 len)
+{
+ return (dist >= LZ77_MATCH_MIN_DIST && dist < LZ77_MATCH_MAX_DIST) &&
+ (len >= LZ77_MATCH_MIN_LEN && len < LZ77_MATCH_MAX_LEN);
+}
+
+static __always_inline const u8 *find_match(u32 *htable, const u8 *base, const u8 *cur,
+ const u8 *end, u32 *best_len)
+{
+ const u8 *match;
+ u32 hash;
+ size_t offset;
+
+ hash = hash3(cur);
+ offset = cur - base;
+
+ if (htable[hash] >= offset)
+ return cur;
+
+ match = base + htable[hash];
+ *best_len = lz77_match(match, cur, end);
+ if (is_valid_match(cur - match, *best_len))
+ return match;
+
+ return cur;
+}
+
+int lz77_compress(const u8 *src, size_t src_len, u8 *dst, size_t *dst_len)
+{
+ const u8 *srcp, *src_end, *anchor;
+ struct lz77_flags flags = { 0 };
+ u8 *dstp, *dst_end, *nib;
+ u32 *htable;
+ int ret;
+
+ srcp = src;
+ anchor = srcp;
+ src_end = src + src_len;
+
+ dstp = dst;
+ dst_end = dst + *dst_len;
+ flags.pos = dstp;
+ nib = NULL;
+
+ memset(dstp, 0, *dst_len);
+ dstp += 4;
+
+ htable = kvcalloc(LZ77_HASH_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!htable)
+ return -ENOMEM;
+
+ /* fill hashtable with invalid offsets */
+ memset(htable, 0xff, LZ77_HASH_SIZE * sizeof(u32));
+
+ /* from here on, any error is because @dst_len reached >= @src_len */
+ ret = -EMSGSIZE;
+
+ /* main loop */
+ while (srcp < src_end) {
+ u32 hash, dist, len;
+ const u8 *match;
+
+ while (srcp + 3 < src_end) {
+ len = LZ77_MATCH_MIN_LEN - 1;
+ match = find_match(htable, src, srcp, src_end, &len);
+ hash = hash3(srcp);
+ htable[hash] = srcp - src;
+
+ if (likely(match < srcp)) {
+ dist = srcp - match;
+ break;
+ }
+
+ srcp++;
+ }
+
+ dstp = write_literals(dstp, dst_end, anchor, srcp - anchor, &flags);
+ if (IS_ERR(dstp))
+ goto err_free;
+
+ if (srcp + 3 >= src_end)
+ goto leftovers;
+
+ dstp = write_match(dstp, &nib, dist, len);
+ srcp += len;
+ anchor = srcp;
+
+ flags.val = (flags.val << 1) | 1;
+ flags.count++;
+ if (flags.count == 32) {
+ lz77_write32(flags.pos, flags.val);
+ flags.count = 0;
+ flags.pos = dstp;
+ dstp += 4;
+ }
+ }
+leftovers:
+ if (srcp < src_end) {
+ dstp = write_literals(dstp, dst_end, srcp, src_end - srcp, &flags);
+ if (IS_ERR(dstp))
+ goto err_free;
+ }
+
+ flags.val <<= (32 - flags.count);
+ flags.val |= (1 << (32 - flags.count)) - 1;
+ lz77_write32(flags.pos, flags.val);
+
+ *dst_len = dstp - dst;
+ ret = 0;
+err_free:
+ kvfree(htable);
+
+ return ret;
+}
new file mode 100644
@@ -0,0 +1,285 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2024, SUSE LLC
+ *
+ * Authors: Enzo Matsumiya <ematsumiya@suse.de>
+ *
+ * Definitions and optmized helpers for LZ77 compression.
+ */
+#ifndef _SMB_COMPRESS_LZ77_H
+#define _SMB_COMPRESS_LZ77_H
+
+#ifdef CONFIG_CIFS_COMPRESSION
+#include <asm/ptrace.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#include <asm-generic/unaligned.h>
+#endif
+
+#define LZ77_HASH_LOG 13
+#define LZ77_HASH_SIZE (1 << LZ77_HASH_LOG)
+#define LZ77_HASH_MASK lz77_hash_mask(LZ77_HASH_LOG)
+
+/* We can increase this for better compression (but worse performance). */
+#define LZ77_MATCH_MIN_LEN 3
+/* From MS-XCA, but it's arbitrarily chosen. */
+#define LZ77_MATCH_MAX_LEN S32_MAX
+/*
+ * Check this to ensure we don't match the current position, which would
+ * end up doing a verbatim copy of the input, and actually overflowing
+ * the output buffer because of the encoded metadata.
+ */
+#define LZ77_MATCH_MIN_DIST 1
+/* How far back in the buffer can we try to find a match (i.e. window size) */
+#define LZ77_MATCH_MAX_DIST 8192
+
+#define LZ77_STEPSIZE_16 sizeof(u16)
+#define LZ77_STEPSIZE_32 sizeof(u32)
+#define LZ77_STEPSIZE_64 sizeof(u64)
+
+struct lz77_flags {
+ u8 *pos;
+ size_t count;
+ long val;
+};
+
+static __always_inline u32 lz77_hash_mask(const unsigned int log2)
+{
+ return ((1 << log2) - 1);
+}
+
+static __always_inline u32 lz77_hash64(const u64 v, const unsigned int log2)
+{
+ const u64 prime5bytes = 889523592379ULL;
+
+ return (u32)(((v << 24) * prime5bytes) >> (64 - log2));
+}
+
+static __always_inline u32 lz77_hash32(const u32 v, const unsigned int log2)
+{
+ return ((v * 2654435769LL) >> (32 - log2)) & lz77_hash_mask(log2);
+}
+
+static __always_inline u32 lz77_log2(unsigned int x)
+{
+ return x ? ((u32)(31 - __builtin_clz(x))) : 0;
+}
+
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+static __always_inline u8 lz77_read8(const void *ptr)
+{
+ return *(u8 *)ptr;
+}
+
+static __always_inline u16 lz77_read16(const void *ptr)
+{
+ return *(u16 *)ptr;
+}
+
+static __always_inline u32 lz77_read32(const void *ptr)
+{
+ return *(u32 *)ptr;
+}
+
+static __always_inline u64 lz77_read64(const void *ptr)
+{
+ return *(u64 *)ptr;
+}
+
+static __always_inline void lz77_write8(void *ptr, const u8 v)
+{
+ *(u8 *)ptr = v;
+}
+
+static __always_inline void lz77_write16(void *ptr, const u16 v)
+{
+ *(u16 *)ptr = v;
+}
+
+static __always_inline void lz77_write32(void *ptr, const u32 v)
+{
+ *(u32 *)ptr = v;
+}
+
+static __always_inline void lz77_write64(void *ptr, const u64 v)
+{
+ *(u64 *)ptr = v;
+}
+
+static __always_inline void lz77_write_ptr16(void *ptr, const void *vp)
+{
+ *(u16 *)ptr = *(const u16 *)vp;
+}
+
+static __always_inline void lz77_write_ptr32(void *ptr, const void *vp)
+{
+ *(u32 *)ptr = *(const u32 *)vp;
+}
+
+static __always_inline void lz77_write_ptr64(void *ptr, const void *vp)
+{
+ *(u64 *)ptr = *(const u64 *)vp;
+}
+
+static __always_inline long lz77_copy(u8 *dst, const u8 *src, size_t count)
+{
+ return copy_from_kernel_nofault(dst, src, count);
+}
+#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+static __always_inline u8 lz77_read8(const void *ptr)
+{
+ return get_unaligned((u8 *)ptr);
+}
+
+static __always_inline u16 lz77_read16(const void *ptr)
+{
+ return lz77_read8(ptr) | (lz77_read8(ptr + 1) << 8);
+}
+
+static __always_inline u32 lz77_read32(const void *ptr)
+{
+ return lz77_read16(ptr) | (lz77_read16(ptr + 2) << 16);
+}
+
+static __always_inline u64 lz77_read64(const void *ptr)
+{
+ return lz77_read32(ptr) | ((u64)lz77_read32(ptr + 4) << 32);
+}
+
+static __always_inline void lz77_write8(void *ptr, const u8 v)
+{
+ put_unaligned(v, (u8 *)ptr);
+}
+
+static __always_inline void lz77_write16(void *ptr, const u16 v)
+{
+ lz77_write8(ptr, v & 0xff);
+ lz77_write8(ptr + 1, (v >> 8) & 0xff);
+}
+
+static __always_inline void lz77_write32(void *ptr, const u32 v)
+{
+ lz77_write16(ptr, v & 0xffff);
+ lz77_write16(ptr + 2, (v >> 16) & 0xffff);
+}
+
+static __always_inline void lz77_write64(void *ptr, const u64 v)
+{
+ lz77_write32(ptr, v & 0xffffffff);
+ lz77_write32(ptr + 4, (v >> 32) & 0xffffffff);
+}
+
+static __always_inline void lz77_write_ptr16(void *ptr, const void *vp)
+{
+ const u16 v = lz77_read16(vp);
+
+ lz77_write16(ptr, v);
+}
+
+static __always_inline void lz77_write_ptr32(void *ptr, const void *vp)
+{
+ const u32 v = lz77_read32(vp);
+
+ lz77_write32(ptr, v);
+}
+
+static __always_inline void lz77_write_ptr64(void *ptr, const void *vp)
+{
+ const u64 v = lz77_read64(vp);
+
+ lz77_write64(ptr, v);
+}
+static __always_inline long lz77_copy(u8 *dst, const u8 *src, size_t count)
+{
+ memcpy(dst, src, count);
+ return 0;
+}
+#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
+
+static __always_inline unsigned int __count_common_bytes(const unsigned long diff)
+{
+#ifdef __has_builtin
+# if __has_builtin(__builtin_ctzll)
+ return (unsigned int)__builtin_ctzll(diff) >> 3;
+# endif
+#else
+ /* count trailing zeroes */
+ unsigned long bits = 0, i, z = 0;
+
+ bits |= diff;
+ for (i = 0; i < 64; i++) {
+ if (bits[i])
+ break;
+ z++;
+ }
+
+ return (unsigned int)z >> 3;
+#endif
+}
+
+static __always_inline size_t lz77_match(const u8 *match, const u8 *cur, const u8 *end)
+{
+ const u8 *start = cur;
+
+ if (cur == match)
+ return 0;
+
+ if (likely(cur < end - (LZ77_STEPSIZE_64 - 1))) {
+ u64 const diff = lz77_read64(cur) ^ lz77_read64(match);
+
+ if (!diff) {
+ cur += LZ77_STEPSIZE_64;
+ match += LZ77_STEPSIZE_64;
+ } else {
+ return __count_common_bytes(diff);
+ }
+ }
+
+ while (likely(cur < end - (LZ77_STEPSIZE_64 - 1))) {
+ u64 const diff = lz77_read64(cur) ^ lz77_read64(match);
+
+ if (!diff) {
+ cur += LZ77_STEPSIZE_64;
+ match += LZ77_STEPSIZE_64;
+ continue;
+ }
+
+ cur += __count_common_bytes(diff);
+ return (size_t)(cur - start);
+ }
+
+ if (cur < end - 3 && !(lz77_read32(cur) ^ lz77_read32(match))) {
+ cur += LZ77_STEPSIZE_32;
+ match += LZ77_STEPSIZE_32;
+ }
+
+ if (cur < end - 1 && lz77_read16(cur) == lz77_read16(match)) {
+ cur += LZ77_STEPSIZE_16;
+ match += LZ77_STEPSIZE_16;
+ }
+
+ if (cur < end && *cur == *match)
+ cur++;
+
+ return (size_t)(cur - start);
+}
+
+static __always_inline unsigned long lz77_max(unsigned long a, unsigned long b)
+{
+ int m = (a < b) - 1;
+
+ return (a & m) | (b & ~m);
+}
+
+static __always_inline unsigned long lz77_min(unsigned long a, unsigned long b)
+{
+ int m = (a > b) - 1;
+
+ return (a & m) | (b & ~m);
+}
+
+int lz77_compress(const u8 *src, size_t src_len, u8 *dst, size_t *dst_len);
+/* when CONFIG_CIFS_COMPRESSION not set lz77_compress() is not called */
+#endif /* !CONFIG_CIFS_COMPRESSION */
+#endif /* _SMB_COMPRESS_LZ77_H */
@@ -978,9 +978,12 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
switch (opt) {
case Opt_compress:
+ if (!IS_ENABLED(CONFIG_CIFS_COMPRESSION)) {
+ cifs_errorf(fc, "CONFIG_CIFS_COMPRESSION kernel config option is unset\n");
+ goto cifs_parse_mount_err;
+ }
ctx->compress = true;
- cifs_dbg(VFS,
- "SMB3 compression support is experimental\n");
+ cifs_dbg(VFS, "SMB3 compression support is experimental\n");
break;
case Opt_nodfs:
ctx->nodfs = 1;
--
2.43.0