Add multi_thread_streams/cutoff support and Samba performance tuning

- Add multi_thread_streams (default 4) and multi_thread_cutoff (default "50M")
  fields to ReadConfig, wired into rclone mount args
- Expose both fields in Web UI config editor under Read Tuning section
- Add Samba performance options: TCP_NODELAY, large readwrite, max xmit
- Update config.toml.default with new fields and sftp_connections guidance

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
grabbit 2026-02-19 14:15:23 +08:00
parent 078ab4505e
commit 5efef83a90
5 changed files with 51 additions and 1 deletions

View File

@ -118,6 +118,12 @@ pub struct ReadConfig {
/// In-memory buffer size (e.g. "256M").
#[serde(default = "default_buffer_size")]
pub buffer_size: String,
/// Number of parallel SFTP streams for single-file downloads (rclone --multi-thread-streams).
#[serde(default = "default_multi_thread_streams")]
pub multi_thread_streams: u32,
/// Minimum file size to trigger multi-thread download (e.g. "50M").
#[serde(default = "default_multi_thread_cutoff")]
pub multi_thread_cutoff: String,
}
/// Bandwidth control.
@ -292,6 +298,12 @@ fn default_read_ahead() -> String {
fn default_buffer_size() -> String {
"256M".into()
}
fn default_multi_thread_streams() -> u32 {
4
}
fn default_multi_thread_cutoff() -> String {
"50M".into()
}
fn default_bw_zero() -> String {
"0".into()
}
@ -417,6 +429,8 @@ impl Config {
writeln!(out, "chunk_limit = {:?}", self.read.chunk_limit).unwrap();
writeln!(out, "read_ahead = {:?}", self.read.read_ahead).unwrap();
writeln!(out, "buffer_size = {:?}", self.read.buffer_size).unwrap();
writeln!(out, "multi_thread_streams = {}", self.read.multi_thread_streams).unwrap();
writeln!(out, "multi_thread_cutoff = {:?}", self.read.multi_thread_cutoff).unwrap();
writeln!(out).unwrap();
// --- Bandwidth ---
@ -690,6 +704,8 @@ mount_point = "/mnt/photos"
assert_eq!(config.read.chunk_limit, "1G");
assert_eq!(config.read.read_ahead, "512M");
assert_eq!(config.read.buffer_size, "256M");
assert_eq!(config.read.multi_thread_streams, 4);
assert_eq!(config.read.multi_thread_cutoff, "50M");
assert_eq!(config.bandwidth.limit_up, "0");
assert_eq!(config.bandwidth.limit_down, "0");
@ -994,6 +1010,8 @@ mount_point = "/mnt/photos"
assert_eq!(config.cache.dir, config2.cache.dir);
assert_eq!(config.cache.max_size, config2.cache.max_size);
assert_eq!(config.read.chunk_size, config2.read.chunk_size);
assert_eq!(config.read.multi_thread_streams, config2.read.multi_thread_streams);
assert_eq!(config.read.multi_thread_cutoff, config2.read.multi_thread_cutoff);
assert_eq!(config.bandwidth.adaptive, config2.bandwidth.adaptive);
assert_eq!(config.writeback.transfers, config2.writeback.transfers);
assert_eq!(config.protocols.enable_smb, config2.protocols.enable_smb);

View File

@ -66,6 +66,13 @@ pub fn build_mount_args(config: &Config, share: &ShareConfig, rc_port: u16) -> V
args.push("--vfs-read-ahead".into());
args.push(config.read.read_ahead.clone());
// Multi-thread download: splits large files across N parallel SFTP streams
args.push("--multi-thread-streams".into());
args.push(config.read.multi_thread_streams.to_string());
args.push("--multi-thread-cutoff".into());
args.push(config.read.multi_thread_cutoff.clone());
// Concurrent transfers for write-back
args.push("--transfers".into());
args.push(config.writeback.transfers.to_string());
@ -240,6 +247,8 @@ mount_point = "/mnt/photos"
assert!(args.contains(&"--dir-cache-time".to_string()));
assert!(args.contains(&"1h".to_string()));
assert!(args.contains(&"--buffer-size".to_string()));
assert!(args.contains(&"--multi-thread-streams".to_string()));
assert!(args.contains(&"--multi-thread-cutoff".to_string()));
assert!(args.contains(&"--transfers".to_string()));
assert!(args.contains(&"4".to_string()));
assert!(args.contains(&"--rc".to_string()));

View File

@ -53,6 +53,13 @@ pub fn generate(config: &Config) -> Result<String> {
writeln!(conf, " printcap name = /dev/null")?;
writeln!(conf, " disable spoolss = yes")?;
writeln!(conf)?;
writeln!(conf, " # Performance tuning")?;
writeln!(conf, " socket options = TCP_NODELAY IPTOS_THROUGHPUT SO_RCVBUF=131072 SO_SNDBUF=131072")?;
writeln!(conf, " read raw = yes")?;
writeln!(conf, " write raw = yes")?;
writeln!(conf, " large readwrite = yes")?;
writeln!(conf, " max xmit = 65535")?;
writeln!(conf)?;
// Share sections — each share points at its own mount_point
for share in &config.shares {
@ -274,6 +281,9 @@ mount_point = "/mnt/photos"
assert!(content.contains("server min protocol = SMB2_02"));
assert!(content.contains("map to guest = Bad User"));
assert!(content.contains("load printers = no"));
assert!(content.contains("socket options = TCP_NODELAY"));
assert!(content.contains("large readwrite = yes"));
assert!(content.contains("max xmit = 65535"));
}
#[test]

View File

@ -18,7 +18,7 @@ nas_user = "admin"
# nas_key_file = "/root/.ssh/id_ed25519"
# SFTP port
sftp_port = 22
# SFTP connection pool size
# SFTP connection pool size (if multi_thread_streams=4, recommend >= 16)
sftp_connections = 8
# --- Additional NAS (uncomment to add) ---
@ -49,6 +49,11 @@ chunk_limit = "1G"
read_ahead = "512M"
# In-memory buffer size
buffer_size = "256M"
# Number of parallel SFTP streams for single-file downloads (improves cold-read speed)
# If using multi_thread_streams=4, set sftp_connections >= 16 for multi-file concurrency
multi_thread_streams = 4
# Minimum file size to trigger multi-thread download
multi_thread_cutoff = "50M"
[bandwidth]
# Upload (write-back) speed limit ("0" = unlimited)

View File

@ -295,6 +295,14 @@ if (window.Alpine) {
<label>Buffer Size</label>
<input type="text" x-model="config.read.buffer_size" placeholder="e.g. 256M">
</div>
<div class="field-row">
<label>Multi-Thread Streams</label>
<input type="number" x-model.number="config.read.multi_thread_streams" min="1" max="64" placeholder="e.g. 4">
</div>
<div class="field-row">
<label>Multi-Thread Cutoff</label>
<input type="text" x-model="config.read.multi_thread_cutoff" placeholder="e.g. 50M">
</div>
</div>
</div>
</section>