Bug ID 1198972
Summary btrfs send slow
Classification openSUSE
Product openSUSE Distribution
Version Leap 15.3
Hardware x86-64
OS openSUSE Leap 15.3
Status NEW
Severity Normal
Priority P5 - None
Component Kernel
Assignee kernel-bugs@opensuse.org
Reporter jon.brightwell@zpg.co.uk
QA Contact qa-bugs@suse.de
Found By ---
Blocker ---

We have a btrfs send that starts fast (25MiBs) but quickly crawls (100KiBs~).

example command:

/sbin/btrfs send -v "/sqldata/central/.snapshots/112/snapshot" | pv -btraT
-B50m | ssh backup3.repl.x.net sudo btrfs receive
"/backup/dbshard1a.prod.x.net/central/112"
At subvol /sqldata/central/.snapshots/112/snapshot
At subvol snapshot
777MiB {----} 0:02:55 [ 130KiB/s] [4.44MiB/s]

It slows down after 400MiB. I added a buffer size to pv check to see if it was
network but the buffer never fills.

htop doesn't show any kernel threads struggling and dstat and iostat don't show
much.

It's a live DB server but there's plenty of IO capacity and we have 5 identical
servers that cap out the network. 


# iostat -x 5
Linux 5.3.18-150300.59.54-default (dbshard1a.prod.x.net)    04/28/22       
_x86_64_        (48 CPU)

avg-cpu:  %user   %nice %system %iowait  %steal   %idle
2.22    0.00    1.27    0.09    0.00   96.43

Device            r/s     w/s     rkB/s     wkB/s   rrqm/s   wrqm/s  %rrqm 
%wrqm r_await w_await aqu-sz rareq-sz wareq-sz  svctm  %util
nvme3n1         58.24  441.63    791.34   8244.19     5.67    51.31   8.88 
10.41    0.18    0.10   0.06    13.59    18.67   0.55  27.45
nvme0n1         86.05  441.56   1379.26   8244.25     5.18    51.38   5.67 
10.42    0.14    0.10   0.06    16.03    18.67   0.53  28.00
nvme1n1         58.27  441.53    791.50   8245.03     5.67    51.32   8.87 
10.41    0.18    0.10   0.06    13.58    18.67   0.55  27.40
nvme2n1         86.08  441.46   1379.37   8244.97     5.18    51.38   5.67 
10.43    0.14    0.10   0.06    16.02    18.68   0.53  28.04
sda             11.45   14.98    712.51     79.42     0.45     0.08   3.79  
0.52    4.37    0.15   0.05    62.20     5.30   1.07   2.84
sdb             11.30   14.98    699.35     79.42     0.44     0.08   3.72  
0.52    4.44    0.15   0.05    61.90     5.30   1.08   2.83

avg-cpu:  %user   %nice %system %iowait  %steal   %idle
3.85    0.00    3.40    0.12    0.00   92.63

Device            r/s     w/s     rkB/s     wkB/s   rrqm/s   wrqm/s  %rrqm 
%wrqm r_await w_await aqu-sz rareq-sz wareq-sz  svctm  %util
nvme3n1         88.80  867.80   1016.00  11651.20     0.00    68.00   0.00  
7.27    0.10    0.08   0.07    11.44    13.43   0.51  48.56
nvme0n1         66.00  867.40    728.80  11651.20     0.00    68.40   0.00  
7.31    0.10    0.08   0.07    11.04    13.43   0.52  48.88
nvme1n1         93.00  863.00   1048.00  11732.80     0.00    72.00   0.00  
7.70    0.10    0.08   0.08    11.27    13.60   0.51  48.64
nvme2n1         65.20  862.80    768.80  11732.80     0.00    72.20   0.00  
7.72    0.18    0.08   0.08    11.79    13.60   0.53  48.72
sda             41.00   72.20   3498.40    445.10     0.00     0.00   0.00  
0.00    0.85    0.11   0.05    85.33     6.16   1.28  14.48
sdb             36.60   72.20   2855.20    445.10     0.00     0.00   0.00  
0.00    0.79    0.10   0.04    78.01     6.16   1.31  14.24

avg-cpu:  %user   %nice %system %iowait  %steal   %idle
4.43    0.00    3.30    0.12    0.00   92.15

Device            r/s     w/s     rkB/s     wkB/s   rrqm/s   wrqm/s  %rrqm 
%wrqm r_await w_await aqu-sz rareq-sz wareq-sz  svctm  %util
nvme3n1         70.80  761.20    769.60  10829.60     0.00    63.40   0.00  
7.69    0.10    0.09   0.08    10.87    14.23   0.50  41.92
nvme0n1         92.20  761.00    869.60  10829.60     0.00    63.60   0.00  
7.71    0.09    0.09   0.08     9.43    14.23   0.52  44.64
nvme1n1         68.80  751.20    767.20  10528.80     0.00    61.80   0.00  
7.60    0.10    0.13   0.10    11.15    14.02   0.51  42.08
nvme2n1         91.00  751.00    931.20  10528.80     0.00    62.00   0.00  
7.63    0.09    0.13   0.11    10.23    14.02   0.53  44.56
sda             14.00   46.60   1089.60    297.40     0.00     0.20   0.00  
0.43    0.69    0.11   0.02    77.83     6.38   1.48   8.96
sdb             10.00   46.60    552.80    297.40     0.00     0.20   0.00  
0.43    0.66    0.11   0.01    55.28     6.38   1.55   8.80

# dstat 5
You did not select any stats, using -cdngy by default.
--total-cpu-usage-- -dsk/total- -net/total- ---paging-- ---system--
usr sys idl wai stl| read  writ| recv  send|  in   out | int   csw
2   1  96   0   0|5753k   32M|   0     0 |  86B  376B|  17k   14k
4   3  92   0   0|8436k   95M|2705k 3862k|   0     0 |  39k   29k
5   3  93   0   0|2510k   34M|2119k 3624k| 819B    0 |  30k   22k
3   3  94   0   0|2274k   39M|3914k 4882k|   0     0 |  37k   27k
3   3  94   0   0|1385k   34M|2466k 3345k|   0     0 |  32k   22k

# ps -aux | grep send
root      7557 94.8  0.0  24572   172 pts/1    Rl+  11:40   8:06 /sbin/btrfs
send -v /sqldata/central/.snapshots/112/snapshot

does seem to be maxing out one core.


# ps -auxf | grep "btrfs"
root      1679  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-worker]
root      1680  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-worker-hi]
root      1681  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-delalloc]
root      1682  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-flush_del]
root      1683  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-cache]
root      1684  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-submit]
root      1685  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-fixup]
root      1686  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio]
root      1687  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio-met]
root      1688  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio-met]
root      1689  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio-rai]
root      1690  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio-rep]
root      1691  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-rmw]
root      1692  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-endio-wri]
root      1693  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-freespace]
root      1694  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-delayed-m]
root      1695  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-readahead]
root      1696  0.0  0.0      0     0 ?        I<   Mar19   0:00  \_
[btrfs-qgroup-re]
root      1716  0.0  0.0      0     0 ?        S    Mar19   0:01  \_
[btrfs-cleaner]
root      1717  1.5  0.0      0     0 ?        S    Mar19 872:22  \_
[btrfs-transacti]
root     22056  0.8  0.0      0     0 ?        I    05:23   3:28  \_
[kworker/u96:10-btrfs-endio-write]
root     28815  0.0  0.0      0     0 ?        I<   06:41   0:00  \_
[kworker/u97:2-btrfs-worker-high]
root     43020  1.5  0.0      0     0 ?        I    09:22   2:22  \_
[kworker/u96:12-btrfs-endio-meta]
root     44734  1.5  0.0      0     0 ?        I    09:41   2:05  \_
[kworker/u96:2-btrfs-endio-meta]
root     45641  0.0  0.0      0     0 ?        I<   09:51   0:00  \_
[kworker/u97:6-btrfs-worker-high]
root       578  0.0  0.0      0     0 ?        I<   10:34   0:00  \_
[kworker/u97:1-btrfs-worker-high]
root      3129  0.0  0.0      0     0 ?        I<   10:59   0:00  \_
[kworker/u97:3-btrfs-worker-high]
root      4401  4.2  0.0      0     0 ?        I    11:06   1:56  \_
[kworker/u96:8-btrfs-endio]
root      5660  0.0  0.0      0     0 ?        I<   11:20   0:00  \_
[kworker/u97:4-btrfs-worker-high]
root      6567  7.8  0.0      0     0 ?        I    11:30   1:45  \_
[kworker/u96:14-btrfs-endio-meta]
root      6570  7.6  0.0      0     0 ?        I    11:30   1:42  \_
[kworker/u96:20-btrfs-endio-meta]
root      6574  4.6  0.0      0     0 ?        I    11:30   1:01  \_
[kworker/u96:24-btrfs-endio-meta]
root      6584  8.2  0.0      0     0 ?        I    11:30   1:49  \_
[kworker/u96:25-btrfs-endio-write]
root      6679  0.0  0.0      0     0 ?        I<   11:31   0:00  \_
[kworker/u97:5-btrfs-worker-high]
root      6681  7.4  0.0      0     0 ?        I    11:31   1:34  \_
[kworker/u96:27-btrfs-endio-meta]
root      7499  1.7  0.0      0     0 ?        I    11:40   0:13  \_
[kworker/u96:1-btrfs-endio-meta]
root      7500  2.7  0.0      0     0 ?        I    11:40   0:21  \_
[kworker/u96:3-btrfs-endio]
root      8460  0.1  0.0      0     0 ?        I    11:50   0:00  \_
[kworker/u96:4-btrfs-endio-meta]

#iotop
PID  PRIO  USER     DISK READ  DISK WRITE  SWAPIN      IO    COMMAND
7557 be/4 root      190.55 K/s    0.00 B/s  0.00 %  0.13 % btrfs send -v
/sqldata/central/.snapshots/112/snapshot
6570 be/4 root        9.30 K/s   18.59 K/s  0.00 %  0.01 %
[kworker/u96:20-bond0]
12922 be/4 mysql    1692.48 K/s   16.67 M/s  0.00 %  0.01 % mariadbd
--defaults-file=/etc/my.appdata.cnf
7499 be/4 root        6.20 K/s    9.30 K/s  0.00 %  0.01 %
[kworker/u96:1-btrfs-endio-write]
22056 be/4 root        3.10 K/s   15.49 K/s  0.00 %  0.00 %
[kworker/u96:10-bond0]
7500 be/4 root        3.10 K/s    3.10 K/s  0.00 %  0.00 %
[kworker/u96:3-btrfs-endio-write]
12761 be/4 mysql      24.79 K/s 1332.29 K/s  0.00 %  0.00 % mariadbd
--defaults-file=/etc/my.central.cnf
6359 be/4 root        0.00 B/s    0.00 B/s  0.00 %  0.00 %
[kworker/12:1-events]
9123 be/4 root        0.00 B/s    0.00 B/s  0.00 %  0.00 %
[kworker/19:0-events]
3015 be/4 dd-agent    0.00 B/s    3.87 K/s  0.00 %  0.00 % agent run -p
/opt/datadog-agent/run/agent.pid
6567 be/4 root        0.00 B/s   12.39 K/s  0.00 %  0.00 %
[kworker/u96:14-btrfs-endio-write]
6584 be/4 root        0.00 B/s    9.30 K/s  0.00 %  0.00 %
[kworker/u96:25-btrfs-endio-write]


What diagnostics are recommended to work out why this one server is a slug and
possibly bugged? :)
scrub is fine but can't take server offline to do a check. No errors in dmesg
or journal.


You are receiving this mail because: