Hello,
is it normal to have more and more <defunct> drbd threads when reseting
secondary node?
Look:
#node1 (primary) : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1]
#node2 : reset
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
#node2 : /etc/init.d/drbd start
syncingquick : OK !
#node1: ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
#node2 : reset
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
#node2 : /etc/init.d/drbd start
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
root 475 97 0 15:22 pts/0 00:00:00 [drbd_asender_0]
root 476 97 0 15:22 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 477 100 0 15:22 pts/0 00:00:00 [drbd_asender_1]
root 478 100 0 15:22 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
drbd used : pre5
kernel 2.4.2
Bye...
--
Jean-Yves BOUET
EADS Defence and Security Networks
jean-yves.bouet@example.com
01 34 60 86 36
is it normal to have more and more <defunct> drbd threads when reseting
secondary node?
Look:
#node1 (primary) : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1]
#node2 : reset
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
#node2 : /etc/init.d/drbd start
syncingquick : OK !
#node1: ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
#node2 : reset
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
#node2 : /etc/init.d/drbd start
#node1 : ps -ef | grep drbd
root 97 1 0 15:16 pts/0 00:00:00 [drbdd_0]
root 100 1 0 15:16 pts/0 00:00:00 [drbdd_1]
root 107 97 0 15:16 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 108 100 0 15:16 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 465 97 0 15:20 pts/0 00:00:00 [drbd_asender_0
<defunct>]
root 466 97 0 15:20 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 467 100 0 15:20 pts/0 00:00:00 [drbd_asender_1
<defunct>]
root 468 100 0 15:20 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
root 475 97 0 15:22 pts/0 00:00:00 [drbd_asender_0]
root 476 97 0 15:22 pts/0 00:00:00 [drbd_syncer_0
<defunct>]
root 477 100 0 15:22 pts/0 00:00:00 [drbd_asender_1]
root 478 100 0 15:22 pts/0 00:00:00 [drbd_syncer_1
<defunct>]
drbd used : pre5
kernel 2.4.2
Bye...
--
Jean-Yves BOUET
EADS Defence and Security Networks
jean-yves.bouet@example.com
01 34 60 86 36