i got a usb networkcard - to name it adsl i use udev and made /etc/udev/rules.d/local.rules
SUBSYSTEMS=="usb", KERNEL=="eth*", ATTRS{idVendor}=="050d", \
ATTRS{idProduct}=="0121", \
NAME="adsl"
/etc/network/interfaces
auto lo adsl lan dlanusb0 ppp0
iface lo inet loopback
iface adsl inet static
address 10.0.0.140
netmask 255.255.255.0
network 10.0.0.0
broadcast 10.0.0.255
up /etc/network/firewall
iface ppp0 inet ppp
provider adsl_provider
iface lan inet static
address 192.168.0.254
netmask 255.255.255.0
network 192.168.0.0
broadcast 192.168.0.255
iface dlanusb0 inet static
address 192.168.1.254
netmask 255.255.255.0
network 192.168.1.0
broadcast 192.168.1.255
/etc/network/firewall defines table ppp-forward:
iptables -N ppp-forward
iptables -A FORWARD -j ppp-forward
add USERNAME to /etc/ppp/chap-secrets
/etc/ppp/peers/adsl_provider
user USERNAME@PROVIDER
noauth
noipdefault
defaultroute
debug
persist
maxfail 0
holdoff 30
connect "while ! ping -c 1 10.0.0.138>/dev/null; do sleep 5; done "
pty "/usr/sbin/pptp 10.0.0.138 --nolaunchpppd --nobuffer"
/etc/ppp/ip-up.d/firewall
#!/bin/sh
OUTSIDE_DEV=$PPP_IFACE
ADSL_DEV=adsl
INSIDE_DEV=lan
INSIDE_DEV2=dlanusb0
OUTSIDE_IP=$PPP_LOCAL
GOOD_ADDR=192.168.0.0/24
GOOD_ADDR2=192.168.1.0/24
iptables -t nat -A POSTROUTING -s $GOOD_ADDR -o $OUTSIDE_DEV -j SNAT --to $OUTSIDE_IP
iptables -t nat -A POSTROUTING -s $GOOD_ADDR2 -o $OUTSIDE_DEV -j SNAT --to $OUTSIDE_IP
iptables -A ppp-forward -s $GOOD_ADDR -i $INSIDE_DEV -o $OUTSIDE_DEV -j good-bad
iptables -A ppp-forward -s $GOOD_ADDR2 -i $INSIDE_DEV2 -o $OUTSIDE_DEV -j good-bad
iptables -A ppp-forward -i $OUTSIDE_DEV -o $INSIDE_DEV -j bad-good
/etc/ppp/ip-down.d/firewall
#!/bin/sh
OUTSIDE_DEV=$PPP_IFACE
ADSL_DEV=adsl
INSIDE_DEV=lan
INSIDE_DEV2=dlanusb0
OUTSIDE_IP=$PPP_LOCAL
GOOD_ADDR=192.168.0.0/24
iptables -t nat -D POSTROUTING -s $GOOD_ADDR -o $OUTSIDE_DEV -j SNAT --to $OUTSIDE_IP
iptables -D ppp-forward -s $GOOD_ADDR -i $INSIDE_DEV -o $OUTSIDE_DEV -j good-bad
iptables -D ppp-forward -i $OUTSIDE_DEV -o $INSIDE_DEV -j bad-good
[ view entry ] ( 512 views ) | print article
dnsmasq with more than one interface
Read More...
[ view entry ] ( 1015 views ) | print article
What i want from a raid1:
notiy if a disk fails
This is done by mdadm
boot degraded with only one disk
boot degraded does not work with ubuntu, because mdadm does not start a degraded raid
my workaround is to add a bootmenue entry in grub
title Ubuntu, kernel 2.6.20-16-generic (raid defect)
root (hd0,1)
kernel /boot/vmlinuz-2.6.20-16-generic root=/dev/md1 ro raid_degraded
initrd /boot/initrd.img-2.6.20-16-generic
and make an initrd (update-innitramfs -u) with /etc/initramfs-tools/scripts/init-premount/raid_degraded
#!/bin/sh
set -eu
PREREQ="udev"
prereqs()
{
echo "$PREREQ"
}
case ${1:-} in
prereqs)
prereqs
exit 0
;;
*)
. /scripts/functions
;;
esac
if [ -e /scripts/local-top/md ]; then
log_warning_msg "old md initialisation script found, getting out of its way..."
exit 1
fi
MDADM=$(command -v mdadm)
[ -x $MDADM ] || exit 0
if grep raid_degraded /proc/cmdline 2>/dev/null; then
echo "MD_DEGRADED_ARGS=' '" >> /conf/md.conf
fi
exit 0
reported to ubuntu https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/120375
easy change disk if one disk fails
Threrefore i made a script doing partitioning, adding to raid and installing bootloader of the new disk
#!/bin/sh
if grep -q sda /proc/mdstat; then
SRC_DISK=/dev/sda
DST_DISK=/dev/sdb
else
SRC_DISK=/dev/sdb
DST_DISK=/dev/sda
fi
if ! sfdisk -l 2>/dev/null | grep -q $SRC_DISK; then
echo $SRC_DISK not found
exit 1
fi
if ! sfdisk -l 2>/dev/null | grep -q $DST_DISK; then
echo $DST_DISK not found
exit 1
fi
sfdisk -d $SRC_DISK | sfdisk $DST_DISK
mdadm /dev/md0 -a ${DST_DISK}1
mdadm /dev/md1 -a ${DST_DISK}2
cat /proc/mdstat
/usr/sbin/grub --batch --device-map=/dev/null <<EOF
device (hd0) $DST_DISK
root (hd0,1)
setup (hd0)
quit
EOF
exit 0
set uuid
ubuntu cannot boot if any uuid of any disk is different - solution set uuid:
mdadm --stop /dev/md0
madmd --assemble --verbose /dev/md0 /dev/hda1 /dev/hdb1 \
--update=uuid --uuid=xxxxxxxx:xxxxxxxx:xxxxxxxx:xxxxxxxx
newer versions of mdadm should be able to set uuid with --create
why is my raid1 so slow ?
Reading from my raid1 is as fast as a single sata disc:
hdparm -t /dev/sda1
/dev/sda1:
Timing buffered disk reads: 172 MB in 3.00 seconds = 57.27 MB/sec
hdparm -t /dev/sdb1
/dev/sdb1:
Timing buffered disk reads: 172 MB in 3.01 seconds = 57.20 MB/sec
hdparm -t /dev/md0
/dev/md0:
Timing buffered disk reads: 172 MB in 3.00 seconds = 57.29 MB/sec
i tried
* different motherboard with a different sata onboard controller but also the same results
* different ubuntu kernels: 2.6.17, 2.60.20 and from gutsy 2.6.22rc
* different images:
linux-image-2.6.20-16-generic 2.6.20-16.29
linux-image-2.6.20-16-lowlatency 2.6.20-16.29
linux-image-2.6.20-16-server 2.6.20-16.29
* also a raid1 with PATA Disks
reported to ubuntu https://bugs.launchpad.net/ubuntu/+source/mdadm/+bug/120378
can anybody explain me why?
[ view entry ] ( 790 views ) | print article
<<First <Back | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 |