summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xhtml/files/00-desktop.jpgbin0 -> 108600 bytes
-rwxr-xr-xhtml/files/01-xfce-settings.jpgbin0 -> 116907 bytes
-rwxr-xr-xhtml/files/01_Open_Test.jpgbin0 -> 34427 bytes
-rwxr-xr-xhtml/files/02-xfce-user-actions.jpgbin0 -> 9663 bytes
-rwxr-xr-xhtml/files/02_Edit_Test_Run_Configurations.jpgbin0 -> 64050 bytes
-rwxr-xr-xhtml/files/03-xfce-window-resize-hot-edge.jpgbin0 -> 61044 bytes
-rwxr-xr-xhtml/files/03_Select_Controller.jpgbin0 -> 45839 bytes
-rwxr-xr-xhtml/files/04_Answer_Prompt.jpgbin0 -> 14622 bytes
-rwxr-xr-xhtml/files/Cc-sa_88x31.pngbin0 -> 5083 bytes
-rwxr-xr-xhtml/files/MgmtStudio1.jpgbin0 -> 28450 bytes
-rwxr-xr-xhtml/files/MgmtStudio2.jpgbin0 -> 17555 bytes
-rwxr-xr-xhtml/files/pathauto-alias-strings0.jpgbin0 -> 28965 bytes
-rw-r--r--src/About.ascii92
-rw-r--r--src/Btrfs:RAID_5_Rsync_Freeze.ascii91
-rw-r--r--src/Building_an_Ejabberd_Server_with_MySql.ascii135
-rw-r--r--src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii38
-rw-r--r--src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii54
-rw-r--r--src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii63
-rw-r--r--src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii55
-rw-r--r--src/Dell_V305_Printer_on_Linux.ascii190
-rw-r--r--src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii106
-rw-r--r--src/Exim_Spam_Filtering_with_Bogofilter.ascii289
-rw-r--r--src/Git:Care_Free_Committing.ascii97
-rw-r--r--src/Git_Basics.ascii220
-rw-r--r--src/Linux:Formatting_a_Hard_Drive.ascii108
-rw-r--r--src/Linux:Luks_Password_Changing.ascii43
-rw-r--r--src/Linux:RAID_Setup.ascii253
-rw-r--r--src/Linux:Secure_Authentication.ascii264
-rw-r--r--src/Linux:System_Encryption.ascii155
-rw-r--r--src/Linux:Vpnc_Restart_Script.ascii47
-rw-r--r--src/Linux:dm-crypt_Encrypted_Home_Directories.ascii213
-rw-r--r--src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii131
-rw-r--r--src/Migrating_from_Drupal_7_to_Habari_.8.ascii91
-rw-r--r--src/Mutt:Sorting_Mail_Like_a_Boss.ascii61
-rw-r--r--src/My_.bashrc.ascii40
-rw-r--r--src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii51
-rw-r--r--src/SQL_2008_Reinstall_Errors.ascii91
-rw-r--r--src/SQL_Server_2008_Memory_Management.ascii72
-rw-r--r--src/Securing_a_Postfix_Smtp_Server.ascii226
-rw-r--r--src/Server_Administration:Firewalls.ascii41
-rw-r--r--src/Sidebar.ascii5
-rw-r--r--src/Team_Password_Management.ascii112
-rw-r--r--src/Updating_SSH_Keys_Across_an_Environment.ascii347
43 files changed, 3781 insertions, 0 deletions
diff --git a/html/files/00-desktop.jpg b/html/files/00-desktop.jpg
new file mode 100755
index 0000000..ac8d307
--- /dev/null
+++ b/html/files/00-desktop.jpg
Binary files differ
diff --git a/html/files/01-xfce-settings.jpg b/html/files/01-xfce-settings.jpg
new file mode 100755
index 0000000..4377f13
--- /dev/null
+++ b/html/files/01-xfce-settings.jpg
Binary files differ
diff --git a/html/files/01_Open_Test.jpg b/html/files/01_Open_Test.jpg
new file mode 100755
index 0000000..6b46fb5
--- /dev/null
+++ b/html/files/01_Open_Test.jpg
Binary files differ
diff --git a/html/files/02-xfce-user-actions.jpg b/html/files/02-xfce-user-actions.jpg
new file mode 100755
index 0000000..50688a1
--- /dev/null
+++ b/html/files/02-xfce-user-actions.jpg
Binary files differ
diff --git a/html/files/02_Edit_Test_Run_Configurations.jpg b/html/files/02_Edit_Test_Run_Configurations.jpg
new file mode 100755
index 0000000..9659ba9
--- /dev/null
+++ b/html/files/02_Edit_Test_Run_Configurations.jpg
Binary files differ
diff --git a/html/files/03-xfce-window-resize-hot-edge.jpg b/html/files/03-xfce-window-resize-hot-edge.jpg
new file mode 100755
index 0000000..30ca230
--- /dev/null
+++ b/html/files/03-xfce-window-resize-hot-edge.jpg
Binary files differ
diff --git a/html/files/03_Select_Controller.jpg b/html/files/03_Select_Controller.jpg
new file mode 100755
index 0000000..e9600b9
--- /dev/null
+++ b/html/files/03_Select_Controller.jpg
Binary files differ
diff --git a/html/files/04_Answer_Prompt.jpg b/html/files/04_Answer_Prompt.jpg
new file mode 100755
index 0000000..b9cbc3a
--- /dev/null
+++ b/html/files/04_Answer_Prompt.jpg
Binary files differ
diff --git a/html/files/Cc-sa_88x31.png b/html/files/Cc-sa_88x31.png
new file mode 100755
index 0000000..f0a944e
--- /dev/null
+++ b/html/files/Cc-sa_88x31.png
Binary files differ
diff --git a/html/files/MgmtStudio1.jpg b/html/files/MgmtStudio1.jpg
new file mode 100755
index 0000000..7385d42
--- /dev/null
+++ b/html/files/MgmtStudio1.jpg
Binary files differ
diff --git a/html/files/MgmtStudio2.jpg b/html/files/MgmtStudio2.jpg
new file mode 100755
index 0000000..6862f91
--- /dev/null
+++ b/html/files/MgmtStudio2.jpg
Binary files differ
diff --git a/html/files/pathauto-alias-strings0.jpg b/html/files/pathauto-alias-strings0.jpg
new file mode 100755
index 0000000..bea7a49
--- /dev/null
+++ b/html/files/pathauto-alias-strings0.jpg
Binary files differ
diff --git a/src/About.ascii b/src/About.ascii
new file mode 100644
index 0000000..18ce3f0
--- /dev/null
+++ b/src/About.ascii
@@ -0,0 +1,92 @@
+About
+=====
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+link:https://iohq.net/[Iohq.net] is my attempt at contributing back to the
+world's largest knowledgebase in history: the internet. Here I document my
+various experiences with technology and anything else I care to blog about.
+
+link:https://github.com/nullspoon[GitHub page]
+
+
+== Who Am I
+
+How dramatic does that sound? My name is Aaron Ball, and as per my Twitter
+page, I am a nix engineer, Android tester, open source fanatic, and regional
+champion of rock paper scissors lizzard spock (but only with my left hand). I
+thank God routinely for all the engineers He has put on this Earth to discover
+and make all the great things that have been made. The world is a complex and
+interesting place and there's nothing like poking at something to figure out
+how it works.
+
+
+== Contact
+
+You can send me an email at my username (nullspoon) at iohq.net (isn't
+obsfucation great?).
+
+If you are particularly concerned with security or just want to add a fellow
+PGP user to your list of security-minded friends, my public key is...
+
+Brace yourselves, it's the public for a 4096 bit private (hehe)...
+
+----
+-----BEGIN PGP PUBLIC KEY BLOCK-----
+Version: GnuPG v2.0.22 (GNU/Linux)
+
+mQINBFKjiVgBEADJ9jRiw9rT6r61eo432GRPCUAacYgCYrT8W8d8SY1DTUy16qyZ
+5mOqlVA1DN12n/pnPB7mgeD4csYstzl4k9dcG5206XC239JJbFB3ezB1P8VRI8VO
+k2iavV1ysYA1e/b4JMvzhQp/i9/JjeoJycLCDJz2ENl31hAsATCOQ+NjQ3Lk2c4R
+qNUJuxoapUn7NwxeY3zWx2nMlTcBVuSuzactnc62zMrB0fNfC13P59e6xiA0KZ2G
+Pbqi1L3Hk38biFLQvEXjAmx77FVpkiWfppuduavBrCXdBLukYeMv9PHR2cxriNWc
+QbZ2Df5Y2z0PrVkSOWSXOF81uNKP/9cJHoRvVOlRT4mejawaOQnrebTjy6xhGT37
+6Ve2eOJOgrTza16gHcysrePWemC0XQG25G1ZlaxYJarIGX7KrHh9pFCz51C29Eh9
+Bm6YS7S6+QmtIVpoMjfEpWZhwjWjYloPNzLiN3x37XUP231M4bJElXeEhOGZjMoG
+ltfPIheY+mWi7UozHdz5o6Kzubzc0xCkGANxIIvYs3Btj9e4n9lChNOS9eAspEoA
+iphYr09afyd/+y3qXotjGow1vmy7FdemZk0Z5MwuIj68W24TZQHPpdJBnFS9gSa4
+0YggSZEdLStMczujqeBXSL+Rznkyp+2XHaG4sn46BSsfVnlNiMgaBzyV8QARAQAB
+tDZOdWxsc3Bvb24gKFByaW1hcnkgbnVsbHNwb29uIGtleSkgPG51bGxzcG9vbkBp
+b2hxLm5ldD6JAjkEEwECACMFAlKjiVgCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIe
+AQIXgAAKCRCHxh4JWkQTntI7D/9iPju0cG38YEMIzKJs9MXHvU9dzh8AdycxsRXy
+pJcQ12nb5RAAWrX8sM31US8V2BpuP/WI3HguzUYVomSPAWf3cjBjpioTA6oG24nW
+jfmoIzTmskrOvS5H4ulM9mSCe1n5UqoMFe/rKwN5wKpQDWKvEns9LEVmsRJ9i8dK
+o5lcIeZF963STKEUllAnPcNhLiaoGIjRScIb+CpUlerxSGFOivN2ksmnsOkTDmCf
+mbXsskqFVZBtO0vuCXsXaM/hcqnK/2PT+jxcaSAoAjPBrx07XQpJbs+Xq78OnVzt
+Xl9tLRsyG2QYxa+rPsZHqxvKyl8CP8oLPWuwTmny0+wXCA1jMhoGlPrJmAnXQiH+
+huyPdhZJZutZQ+aiEHT6Uvd3QKJsMQfZFBdJlyFWuA7k73Rg1zaBGIvPOKCIL71Y
+JqKd7VKAQtk97Tc6KcPE31FzDeFtE65LIj7Q9vW4qi2PI5QIs022aQdezLTFovsI
+y5Tvziq0war34YrmLmVb0R6IF5uNg9WKP7GL5njk2+Fpf9J8uYA49pqtR85Jymhn
+5GlUoi7opiCo/4PBdIBcYUF7DCaUQl5z8wHXGEuHssSjjOlT9EHMGVmVVGxC/yGJ
+TAiLEAFH0+cMlN1pzxZPcK/zDRzwkdwsdvdvNgqZorXH/I7zDc0ROg4vp/Q0Iwkz
+U4Gdf7kCDQRSo4lYARAAyl6iY6VOR/Amwmeu5j5gmZZ4T+ic0oyQ9ZyCzvGoL3KU
+IDrkIzzemyOnE92xfq9NgdNa+ITNHHTXXsp9Ayey8/h1W/twkJprAr9tP73tWraG
+swqnm+C3hs7k9ntkcXkHSonewHoK9Ro4Pki/MfW6YwwssNlmpPPz08Tnn/R8x+RK
+ApNmBjx7yRwmiyQlxXAaK/LSlVM62DwLEt24n+gcN7ukp+nGx63HaxpXGMmkDXIQ
+9AzidTnoKO2KpvYkkBkd/cF0XgXKJPqU1KV/gbb4uQ21Upiht+Stuqp0Zawq9F2u
+GUEFzviMqlT5dhh0T48YzJyCdeKxpkd5XLyOKnzCW2oXlvY5lBieIHjRBil7NkMy
+ezkgsy+S1+eQDtAdAVgQi+MeeXpI5k+o1nF0rl1ivnhJPYvQ8/4oOOwuK6FwWua4
+Sd057X53Bgp7xvZKxOlEYhskgyz9W0uocgX8DhhB49rfw9c3PgqagUrDuDGQW12l
+HkxYuLMtcc7N2jpI11VXfsGCnTxmFNWXSQjzbKh50egPp4d1C6osvwWMNDWCu3ry
+GMVYj5hdDqwLQtcGD1+9uw9IYEDO+pXvdRMrPvEdPfFDvsIWXKKMM4CH4fKuMZpL
+y4esJFy15ARLcrDhD2ceN5xVaPYuz374tAWngcn44GFt9B5H1ayxRRgV/ydV3S0A
+EQEAAYkCHwQYAQIACQUCUqOJWAIbDAAKCRCHxh4JWkQTnrPIEACP0ravu0uIVyg0
+21T9k+khF7nWBRgc3e8bX1uKOqC800RePXyu96wl7DA5agvf3271NtXFFfALwkzg
+NZ2d5+KNKzoz9zrz7txmEHcgNHrWeXY220YmhgEyDD/rDS6sjGn9O/Obb+f8mEoY
+XhWrSQkGWIgtY3Qb+wZnA6gA7VzmVgHxiKcM4XH6QhJol9mgCWZs7zxcHVz0mMNf
+fffyRuf4/JkyZ6WohsMPXL0vsSX9j49n3f7N/G1TBICTQ6qDvMeRMhaJpkliVsHR
+kVy/Oo+LWQ7wEy1OJB9Ey/KUIAKP481xcCIEquV7LHFzRuNf/hPE6A9iKGgAAN1z
+FAdCwe+8BDvybW8+xt+WdHULNnPcaEIIEJAeoRWg5yomJ5ObAkrcz/F+1VxUPTle
+t5X+P7KWk0pai2GBCKhyACHN7WKqxM8BE8qg+d5Xpg4RVkerFtEIKB1PwHcsFGXX
+9mFxHblNYJ/xxxX5MK1qKIKJoyFFhNR7sw8+SKg9gCrv8nwOz15gC+4cV0LHm7mg
+1CgwS4qiAmomWLogal7O+iV960usfSFEE4BSPq7JMIDn9ICfOYjeIAQ8wCw7ZjcN
+ykOycANTpIp7O7gnSNm2V9i1JTK4M9hX9DWtse7lA1YZiYqfRHWdKE6UCapiVKl+
+Ldfv23c2sRAxPA27rFmsgcOGz9iV8w==
+=OITG
+-----END PGP PUBLIC KEY BLOCK-----
+----
+
+// vim: set syntax=asciidoc:
diff --git a/src/Btrfs:RAID_5_Rsync_Freeze.ascii b/src/Btrfs:RAID_5_Rsync_Freeze.ascii
new file mode 100644
index 0000000..bcdcb84
--- /dev/null
+++ b/src/Btrfs:RAID_5_Rsync_Freeze.ascii
@@ -0,0 +1,91 @@
+Btrfs:RAID 5 Rsync Freeze
+=========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+My server's _/home/_ directory is a btrfs RAID 5, spanning three drives (I did
+a blog post about it Btrfs:RAID_Setup[here]). Everything worked fine, until I
+used rsync to sync my files from my laptop to my server. At that point, the
+sync would go well for a little while and then slow to a crawl. I couldn't
+cancel the sync with a ctrl+c. If I could get on my server over ssh, I'd find
+that one of my cpus was pegged at 100%. Sometimes though it got so bogged down
+I couldn't even get to the server at all. If I were already on the server and I
+did a kill -9 on rsync, it'd go defunct.
+
+I checked my logs after trying to umount /home/ and found...
+
+----
+Nov 03 12:01:18 zion kernel: device label home devid 1 transid 1173 /dev/sdb
+Nov 03 12:01:19 zion kernel: btrfs: disk space caching is enabled
+Nov 03 12:11:53 zion kernel: INFO: task umount:1668 blocked for more than 120 seconds.
+Nov 03 12:11:53 zion kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+Nov 03 12:11:53 zion kernel: umount D ffff880037afbc60 0 1668 1653 0x00000000
+Nov 03 12:11:53 zion kernel: ffff880037afbbd0 0000000000000086 0000000000014500 ffff880037afbfd8
+Nov 03 12:11:53 zion kernel: ffff880037afbfd8 0000000000014500 ffff8800aa0caa30 0000000000000010
+Nov 03 12:11:53 zion kernel: 000000000d6fffff ffff880037afbb98 ffffffff8113a911 ffff8800afedb728
+Nov 03 12:11:53 zion kernel: Call Trace:
+Nov 03 12:11:53 zion kernel: [<ffffffff8113a911>] ? free_pcppages_bulk+0x3b1/0x3f0
+Nov 03 12:11:53 zion kernel: [<ffffffff81132700>] ? filemap_fdatawait+0x30/0x30
+Nov 03 12:11:53 zion kernel: [<ffffffff814e1029>] schedule+0x29/0x70
+Nov 03 12:11:53 zion kernel: [<ffffffff814e12cf>] io_schedule+0x8f/0xe0
+Nov 03 12:11:53 zion kernel: [<ffffffff8113270e>] sleep_on_page+0xe/0x20
+Nov 03 12:11:53 zion kernel: [<ffffffff814ddb5b>] __wait_on_bit_lock+0x5b/0xc0
+Nov 03 12:11:53 zion kernel: [<ffffffff8113284a>] __lock_page+0x6a/0x70
+Nov 03 12:11:53 zion kernel: [<ffffffff81084800>] ? wake_atomic_t_function+0x40/0x40
+Nov 03 12:11:53 zion kernel: [<ffffffff81141fa3>] truncate_inode_pages_range+0x613/0x660
+Nov 03 12:11:53 zion kernel: [<ffffffff81142005>] truncate_inode_pages+0x15/0x20
+Nov 03 12:11:53 zion kernel: [<ffffffffa07df172>] btrfs_evict_inode+0x42/0x380 [btrfs]
+Nov 03 12:11:53 zion kernel: [<ffffffff811b97b0>] evict+0xb0/0x1b0
+Nov 03 12:11:53 zion kernel: [<ffffffff811b98e9>] dispose_list+0x39/0x50
+Nov 03 12:11:53 zion kernel: [<ffffffff811ba56c>] evict_inodes+0x11c/0x130
+Nov 03 12:11:53 zion kernel: [<ffffffff811a1cc8>] generic_shutdown_super+0x48/0xe0
+Nov 03 12:11:53 zion kernel: [<ffffffff811a1f22>] kill_anon_super+0x12/0x20
+Nov 03 12:11:53 zion kernel: [<ffffffffa07a8ee6>] btrfs_kill_super+0x16/0x90 [btrfs]
+Nov 03 12:11:53 zion kernel: [<ffffffff811a22fd>] deactivate_locked_super+0x3d/0x60
+Nov 03 12:11:53 zion kernel: [<ffffffff811a28e6>] deactivate_super+0x46/0x60
+Nov 03 12:11:53 zion kernel: [<ffffffff811bdeaf>] mntput_no_expire+0xef/0x150
+Nov 03 12:11:53 zion kernel: [<ffffffff811bf0b1>] SyS_umount+0x91/0x3b0
+Nov 03 12:11:53 zion kernel: [<ffffffff814ea5dd>] system_call_fastpath+0x1a/0x1f
+----
+
+The only way to solve the problem was to perform a restart. After that, the
+problem would come back as soon as I started rsync again.
+
+
+[[the-solution]]
+== The Solution
+
+I hunted around for a while until I finally just searched for the name of the
+pegged process, **btrfs-endio-wri**, and cpu time. It turns out, the btrfs
+folks have https://btrfs.wiki.kernel.org/index.php/Gotchas[a page] detailing a
+list of current "gotchas" btrfs has. This issue was one of them. They describe
+it as <pre> Files with a lot of random writes can become heavily fragmented
+(10000+ extents) causing trashing on HDDs and excessive multi-second spikes of
+CPU load on systems with an SSD or large amount a RAM. ... Symptoms include
+btrfs-transacti and btrfs-endio-wri taking up a lot of CPU time (in spikes,
+possibly triggered by syncs). You can use filefrag to locate heavily fragmented
+files. </pre>
+
+One of the best parts of rsync is that is syncs deltas instead of resyncing the
+entire file. What does that result in? Lots of little random writes. Sounds
+like a match to me.
+
+**To fix this**, I defragged all of /home/ (with _compression=lzo_ of course :)
+), and remounted using the *autodefrag* option.
+
+Now I can run rsync with no problems.
+
+One last thing to note. Their gotchas page says that once they've worked out a
+few potential kinks with the autodefrag mount option, they'll make it the
+default, which should prevent this from being an issue in future versions.
+
+Category:Linux
+Category:Btrfs
+Category:Storage
+Category:RAID
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Building_an_Ejabberd_Server_with_MySql.ascii b/src/Building_an_Ejabberd_Server_with_MySql.ascii
new file mode 100644
index 0000000..dfea4d1
--- /dev/null
+++ b/src/Building_an_Ejabberd_Server_with_MySql.ascii
@@ -0,0 +1,135 @@
+Yesterday I was upgrading my
+http://www.igniterealtime.org/projects/openfire/[OpenFire] server and thought
+it might be fun to learn something new and switch to a different server
+software. After doing some research, I decided upon
+http://www.ejabberd.im/[ejabberd] since that one seems to be a popular solution
+(not to mention that specs of course).
+
+I keep my jabber data in a MySql database and I don't really want to migrate
+away from that. That being said, I had a really difficult time finding any
+complete documentation on how to configure an ejabberd server to work with a
+MySql database. Here's how I did it.
+
+Firstly, you of course need to grab said bin
+http://www.process-one.net/en/ejabberd/archive/[here]. Once you have extracted
+and installed, you'll need to edit your config file (conf/ejabberd.cfg). You'll
+see a section in the middle (or so) that
+looks like
+
+----
+%%% ==============
+%%% AUTHENTICATION
+
+
+%%
+%% auth_method: Method used to authenticate the users.
+%% The default method is the internal.
+%% If you want to use a different method,
+%% comment this line and enable the correct ones.
+%%
+{auth_method, internal}.
+
+
+%%
+%% Authentication using external script
+%% Make sure the script is executable by ejabberd.
+%%
+%%{auth_method, external}.
+%%{extauth_program, "/path/to/authentication/script"}.
+
+
+%%
+%% Authentication using ODBC
+%% Remember to setup a database in the next section.
+%%
+%%{auth_method, odbc}.
+----
+
+Comment out the internal auth method line
+
+----
+%%\{auth_method, internal}.
+----
+
+Now, skip down to the line and uncomment the odbc auth
+method.
+
+----
+{auth_method, odbc}.
+----
+
+Lastly in the config file, we need to configure our database connection
+string. Head on down to the following location, uncomment the first
+odbc_server line and fill in your database connection information.
+
+----
+%%
+%% MySQL server:
+%%
+{odbc_server, {mysql, "MySqlServer", "MySqlDatabase", "MySqlUsername", "MySqlPassword"}}.
+----
+
+It's at this point that you might be thinking to yourself, "...but I don't have
+a database or tables configured". This is the part where I initially got stuck.
+All of the documentation I found pointed towards a sql file that could be found
+in the source code. Other sources indicated that ejabberd needs to be compiled
+with mysql support for this all to work. Thankfully, this is not the case (as
+per my experience at least). I can't say this about the deb or the rpm
+installs, but the gzipped binary at least has this.
+
+If you go into the install location and navigate on down to
+
+----
+<ejabberd-home>/lib/ejabberd-2.1.8/priv/odbc/mysql.sql
+----
+
+and run the mysql file in there on the database you have created, you will find
+yourself with a completely empty database structure (but a structure none the
+less).
+
+Finally, we have to go back and make a few more simple changes to our conf
+file. The config file references several modules that store their data to the
+internal database, unless otherwise specified. We are going to otherwise
+specify here.
+
+Crack open that config file again located at conf/ejabberd.cfg Navigate down to
+the section that looks like the following (I won't put the whole thing in here
+since it's a big section)
+
+----
+%%% =======
+%%% MODULES
+
+
+%%
+%% Modules enabled in all ejabberd virtual hosts.
+%%
+----
+
+Here you'll find a lot of lines starting with **mod_**. These are all the
+modules your ejabberd instance will load on startup. There are several in here
+that we need to add *_odbc* to the end of to make them talk to our MySql
+database instead of the internal database. Find the following listed modules
+and add _odbc to them (I've already done that in my list)
+
+----
+{mod_last_odbc, []},
+{mod_offline_odbc, []},
+{mod_privacy_odbc, []},
+{mod_private_odbc, []},
+{mod_pubsub_odbc, [ % requires mod_caps ...
+{mod_roster_odbc, []},
+{mod_vcard_odbc, []},
+----
+
+And finally, we're done. On a side note, you might want to uncomment the module
+mod_65[] to enable file transfers. You never know when you'll need to
+http://xkcd.com/949/[transfer a big file].
+
+
+Category:MySQL
+Category:XMPP
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii b/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii
new file mode 100644
index 0000000..a30a6b6
--- /dev/null
+++ b/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii
@@ -0,0 +1,38 @@
+Compiling MariaDB:cannot find ncurses: File format not recognized
+=================================================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+This week I have been trying to upgrade my MariaDB instance to latest and have
+been having some problems with getting it to compile right. My first issue was
+that it couldn't find the ncurses libraries, so I had to grab and compile the
+source for that (I'm trying to run everything in userspace). Once I did that, I
+specified the "--with-named-curses-libs=[DIR]" switch and began my
+re-configure. Once I did that, I received the following error:
+
+----
+/usr/bin/ld: cannot find /home/mariadb/ncurses/: File format not recognized
+collect2: ld returned 1 exit status
+make[2]: *** [mysql] Error 1
+make[2]: *** Waiting for unfinished jobs....
+make[2]: Leaving directory `/home/mariadb/mariadb-5.3.3-rc-src/client'
+make[1]: *** [all] Error 2
+make[1]: Leaving directory `/home/mariadb/mariadb-5.3.3-rc-src/client'
+make: *** [all-recursive] Error 1
+----
+
+I searched around for some time and could not find the answer until I happened
+upon something unrelated that pointed me towards the
+*--with-client-ldflags=-all-static* switch. I threw that switch onto the end
+of my configure string and presto!
+
+
+
+Category:MariaDB
+Category:MySQL
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii b/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii
new file mode 100644
index 0000000..f9176f1
--- /dev/null
+++ b/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii
@@ -0,0 +1,54 @@
+Compiling Nginx on Solaris 10 - Configure:test:argument expected
+================================================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Yesterday I was working on compiling nginx on one of our solaris boxes at work
+(someone please tell me why companies still choose Solaris over the various
+other unix or linux distros out there) and I ran into a problem. When I ran
+configure with any options, I saw the following error:
+
+----
+. / Configure: test: argument expected
+----
+
+And if you try to run make or gmake after that you get this error
+
+----
+Make: Fatal error: Command failed for target `objs / src / core / nginx.o
+----
+
+That's no fun, huh? Well, I searched around for a while and found the solution
+http://forum.nginx.org/read.php?21,220311,220313[here], which happened to be in
+Russian (Dear
+http://translate.google.com/translate?hl=en&sl=ru&tl=en&u=http%3A%2F%2Fforum.nginx.org%2Fread.php%3F21%2C220311%2C220313[Google
+Translate]).
+
+Basically, the problem was that the version of sh that Solaris 10 defaults to
+is very old and not POSIX compliant (go figure). The solution is to change the
+configure script to use a different version of sh. At the top of your config
+file, change the following line from
+
+----
+#!/bin/sh
+----
+
+to
+
+----
+#!/usr/xpg4/bin/sh
+----
+
+Rerun your configure script with all of your switches and all should be well
+(yay!). Once you've done that, gmake/make should run without a hitch (at least
+not due to this problem we hope).
+
+
+Category:Solaris
+Category:Nginx
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii b/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii
new file mode 100644
index 0000000..0a83bea
--- /dev/null
+++ b/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii
@@ -0,0 +1,63 @@
+Configuring Status.Net for NGINX in a Subdirectory
+==================================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+This morning I tried to get status.net to work from a subdirectory of my main
+site, a task which proved to be quite frustrating, especially for someone who's
+not too great at rewrite rules in apache, let alone NGINX. Unfortunately,
+there is also not much documentation on this topic online since status.net does
+not officially support NGINX. That's okay though. I don't know much about
+rewrites, since they use regex, it seems you should be able to make just about
+anything work (I could be wrong about that though).
+
+To get this to work, we first need a location directive for our main site. That
+would look something like
+
+----
+location / {
+ index index.php;
+ try_files $uri $uri/ @rewriteSection;
+}
+location @rewriteSection {
+ rewrite (.*blah.*) index.php?q=$1;
+}
+----
+
+Now that we have that, we can go ahead and put our subdirectory directive in
+here. For the purposes of this demonstration, our status.net instance will be
+running in a directory called testsub.
+
+----
+location /testsub {
+ index index.php;
+ try_files $uri $uri/ @testsub;
+}
+location @testsub {
+ ## FOR FANCY URLS FALSE
+ ## rewrite ^/testsub/index.php/(.*)$ /testsub/index.php?p=$1 last;
+ ## FOR FANCY URLS TRUE
+ rewrite ^/testsub/(.*)$ /testsub/index.php?p=$1 last;
+}
+----
+
+
+To make this work for your instance, all you should need to do is swap out the
+testsub directory references for the directory your status.net instance is
+running in and you should be set. Keep in mind though that by default,
+status.net has fancy URLs disabled. That means you'll have to use the first
+rewrite line. If fancy URLs are turned on, then you should use the second
+rewrite line. That should be it!
+
+Yay microblogging!
+
+
+Category:Nginx
+Category:Status.Net
+Category:Blogging
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii b/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii
new file mode 100644
index 0000000..3282b7f
--- /dev/null
+++ b/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii
@@ -0,0 +1,55 @@
+Creating_Search_Engine_Optimized_Drupal_URLS
+============================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+A big piece to search engine optimization is how your URLs are structured. A
+ways back, I was talking to a buddy of mine who does SEO for a living and he
+suggested that I use WordPress' URL rewrites to make my URLs friendlier. I went
+ahead and set my blog up for a 'yyyy/mm/dd/title' format and it did wonders for
+my search rankings. Recently however, I moved to Drupal which sadly does not
+automagically create the friendly aliases to your posts. There is good news
+though. In typical Drupal fashion, there's a module for that (kind of like
+"there's an app for that") and it is very customizable.
+
+To set yourself up with article urls (or blog urls) that autoalias with a
+format that you want, you need to grab two modules. First you need the
+*Pathauto* module, and that depends on the *Token* module. Before we continue,
+I'm writing this to fit a Drupal 7 scenario, so likely some of the stuff will
+be in a slightly different place if you're running 6 or 5.
+
+Now, once you have those enabled, head on over to the Configuration->URL
+aliases section of your Drupal site. Once there, select the pattern tab.
+
+Where we put our aliasing string here depends on whether your writing your
+content as a blog or an article content type.
+
+If you blog in article content types, put the following string in the
+*Pattern for All Article Paths* field:
+
+----
+[current-date:custom:Y]/[current-date:custom:m]/[current-date:custom:d]/[node:title]
+----
+
+If you blog in blog format, put the following string in the *Pattern for all
+Blog entry paths* field:
+
+----
+[current-date:custom:Y]/[current-date:custom:m]/[current-date:custom:d]/[node:title]
+----
+
+image:files/pathauto-alias-strings0.jpg[height=300]
+
+Keep in mind that I formatted those strings for blog entries. If you're doing
+basic pages or something like those, you likely won't want the format I used in
+this article. Just expand the *Replacement Patterns* section in your patterns
+tab to see what other options you have for formatting those URLs.
+
+
+Category:Drupal
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Dell_V305_Printer_on_Linux.ascii b/src/Dell_V305_Printer_on_Linux.ascii
new file mode 100644
index 0000000..16da7e8
--- /dev/null
+++ b/src/Dell_V305_Printer_on_Linux.ascii
@@ -0,0 +1,190 @@
+Dell V305 Printer on Linux
+==========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+I spent this week hanging out with my wife and grandparents-in-law and spent
+some of my time performing the obligatory family tech support (no complaining
+here, I love doing that most of the time). To sum up the beginning quickly
+because I really don't want to write the post in great detail, my grandfather's
+computer got temporarily hosed and the guy from Dell made it even worse (thanks
+Deepak). He actually wiped the computer after taking very minimal backups
+(thankfully just enough). Not only that, but the restore from the Dell image
+actually corrupted a bunch of the core system libraries making installing and
+updating Windows or Microsoft software impossible. After wasting an hour trying
+to fix this, I finally decided to reinstall a fresh copy of Windows. Then it
+hit me, my grandfather doesn't use his computer for much more than Word
+documents, PDFs, browsing the internet, and email - all things that Linux does
+very well. With that, I suggested to him that he try
+http://linuxmint.com/[Linux Mint] (my favorite ready-to-go Linux desktop
+distro). After he played around with the live version for a bit, he decided he
+really liked it (kudos to you Linux Mint guys) so I went ahead to install it.
+
+I got everything working easily but one thing... his printer.
+
+[[the-dell-v305-printer]]
+The Dell V305 Printer
+--------------------
+
+The Dell V305 is actually a Lexmark printer rebranded as a Dell. Specifically,
+it is the Lexmark x4650. Thankfully, Lexmark makes
+http://support.lexmark.com/index?page=downloadFile&actp=CONTENT&productCode=LEXMARK_X4650&id=DR20523&segment=DOWNLOAD&userlocale=EN_US+&locale=en&oslocale=en_US[a
+linux driver] for this thing, but it is of course, very problematic. When I
+first ran the .sh with an embedded binary, it ran fine until I got to the
+install where it gave me an ambiguous "failed to install". When you click
+**OK**, it closes the window with the actually error text in it. While the
+"failed to install" dialog is up, you can't check the log because it won't let
+you select the background window. Also, the background window isn't resizable
+so you can't hope for a bigger window to compensate for no scrollback. Great
+design, huh?
+
+I did notice on the last three or so lines though that it was trying to remove
+a .deb file. With that, I set out to search for it.
+
+
+[[the-fun-begins]]
+The Fun Begins
+--------------
+
+If you run the
+'http://support.lexmark.com/index?page=downloadFile&actp=CONTENT&productCode=LEXMARK_X4650&id=DR20523&segment=DOWNLOAD&userlocale=EN_US+&locale=en&oslocale=en_US[lexmark-08z-series-driver-1.0-1.i386.deb.sh]'
+file with the _--keep_ switch, the script will not remove all files extracted
+to perform the install process. This will leave you with a nicely populated tmp
+folder.
+
+If you cd into the tmp directory, you will find a file called
+**installarchived_all**. This is actually an lzma archive file. What you want
+to do now is extract this file using the following command
+
+----
+tar -xvf ./installarchived_all --lzma
+----
+
+This will extract several files, one of which will be called
+__lexmark-08z-series-driver-1.0-1.i386.deb__. You might think that this is a
+time for rejoicing, but alas it is not. From this point we should be able to
+run _dpkg -i ./lexmark-08z-series-driver-1.0-1.i386.deb_ and it would work, but
+it won't. If you do that you will receive the following friendly error:
+
+----
+dpkg: error processing ./lexmark-08z-series-driver-1.0-1.i386.deb (--install):
+ parsing file '/var/lib/dpkg/tmp.ci/control' near line 9 package 'lexmark-08z-series-driver':
+ blank line in value of field 'Description'
+Errors were encountered while processing:
+ ./lexmark-08z-series-driver-1.0-1.i386.deb
+----
+
+What? The .deb file was constructed wrong? 'Tis a shame. Here's where it gets
+really fun. What we need to do now is extract the deb file, modify the contents
+of a single file, and repackage the whole thing back up.
+
+First, let's create a working directory, copy our deb file in there, extract
+it, and set up a deb package folder structure. Create our working directory and put the bad .deb file in there.
+
+----
+mkdir ./working
+cp ./lexmark-08z-series-driver-1.0-1.i386.deb ./working/
+cd working
+----
+
+Extract the .deb file and clean up a bit (don't forget the period at the
+end of the dpkg-deb line).
+
+----
+dpkg-deb -R lexmark-08z-series-driver-1.0-1.i386.deb .
+rm ./lexmark-08z-series-driver-1.0-1.i386.deb
+----
+
+
+[[fixing-the-problem]]
+Fixing the Problem
+------------------
+
+The problem as you like noticed earlier is because the .deb file has a file
+named _control_ that is improperly formatted. Specifically, control files
+cannot have blank lines in them. To have a "blank" line in a .deb control file,
+you must have a period instead. That said, here's how we fix the file.
+
+Open up the control file in the DEBIAN directory and put a ' .' (yes, with the
+space before it) like so
+
+----
+Description:
+ Lexmark 08z Series Drivers Package
+ .
+ This package contains the Lexmark 08z Series Drivers. This is
+ a copyrighted package, please refer to the copyright notice
+ for details about using this product.
+----
+
+Now that that's done, We just need to repackage the .deb file and install it.
+To do that, cd out to one directory above the lexmark-08z directory (the
+working directory) and run **dpkg -b lexmark-08z**. This will take a few
+seconds (it's 22 megs) but it should create a file called lexmark-08z.deb. Now
+install this using **dpkg -i**.
+
+----
+dpkg -b lexmark-08z dpkg -i ./lexmark-08z.deb
+----
+
+_I'm too lazy to write the rest out right now so here's the shorthand_
+
+Now you have to edit a ton of files in __/usr/local/lexmark/08zero/etc/__.
+
+Firstly, we need to edit 99-lexmark-08z.rules and take the following
+line on the top so it looks like so
+
+----
+ATTRS{idVendor}=="413c", ATTRS{idProduct}=="5305", MODE="666"
+
+ACTION!="add", GOTO="lexmark_custom_rules_end"
+ATTRS{idVendor}=="413c", ATTRS{idProduct}=="5305", MODE="666"
+ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0142", MODE="666"
+ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0150", MODE="666"
+ATTRS{idVendor}=="043d", ATTRS{idProduct}=="013f", MODE="666"
+ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0151", MODE="666"
+ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0116", MODE="666"
+LABEL="lexmark_custom_rules_end"
+
+----
+
+Now that we've updated the 99-lexmark-08z.rules file, we need to edit a load of
+the lxd*.conf files. I say we need to edit lots of them because I'm still not
+sure which one or combination of them actually did the trick. I can say though
+that just lxdm.conf wasn't enough.
+
+Now, edit the following files
+
+* lxdm.conf
+* lxdq.conf
+* lxdw.conf
+* lxdu.conf
+* lxdx.conf
+
+...and replace *all* instances of _0116_ with _5305_ and all instances of
+_043D_ with _413C_
+
+Once that is done, add your printer from the cups admin console
+(localhost:631). Once you get to the driver part, select Lexmark 3600-4600 and
+you should be set!
+
+**Whew**
+
+Finally, here are the resources I found to help me out with this
+solution.
+
+* http://ubuntuforums.org/showpost.php?p=7809488&postcount=1
+* http://ubuntuforums.org/archive/index.php/t-1243920.html
+* http://ubuntuforums.org/archive/index.php/t-1554718.html
+* http://ubuntuforums.org/showthread.php?t=1379902
+* http://ubuntuforums.org/showthread.php?t=1554718&page=1
+
+
+Category:Linux
+Category:Debian
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii b/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii
new file mode 100644
index 0000000..ce8b379
--- /dev/null
+++ b/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii
@@ -0,0 +1,106 @@
+Drupla, Mod Rewrite, Subdirectories, and NGINX
+==============================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+A few days ago I started dabbling with nginx (many thanks for the
+http://arstechnica.com/business/news/2011/11/a-faster-web-server-ripping-out-apache-for-nginx.ars[article]
+from http://arstechnica.com/[arstechnica]) knowing I was getting myself into a
+world without htaccess files. They say that Nginx is easier to configure than
+Apache, but oh contraire! If you're doing a simple setup, yes, Nginx is much
+easier than Apache. If you're even doing a slightly more complicated virtual
+host setup, Nginx is definitely much easier. However, if you do much with
+mod_rewrite in Apache, you'll likely find yourself confused a bit with all
+kinds of 404s on your subdirectories. Believe it or not though, with Nginx it
+is actually easier to configure URI rewriting as well, provided you know what
+you're doing...which I do not.
+
+My current setup has Drupal at the root directory, and various other tidbits
+hosted in subdirectories. These aren't anything fancy like subdomains, just
+directories beneath /.
+
+Pretty much any CMS/blog these days uses the .htaccess file to perform URI
+rewrites for search engine friendly URIs, which causes some complications for
+Nginx since you have one config file to set up all of that for your entire
+domain, rather than a config file per directory (if you wish) defining rewrite
+rules for each one. To get my Drupal instance back up and running, I took the
+location directive from the http://drupal.org/node/110224[Drupal Support page]
+for this issue. Specifically I used the following lines...
+
+----
+location / {
+ root /path/to/drupal;
+ index index.php index.html;
+ if (!-f $request_filename) {
+ rewrite ^(.*)$ /index.php?q=$1 last;
+ break;
+
+ }
+ if (!-d $request_filename) {
+ rewrite ^(.*)$ /index.php?q=$1 last;
+ break;
+ }
+}
+----
+
+The problem with using that configuration is that any time you try to hit a
+legitimate sub directory, you receive a 404. The reason for this is because the
+request_filename will end up going to
+<nowiki>http://yoursite.com/index.php?q=request_filename</nowiki>. An example
+of this would be... Say you go to your site at the following URI:
+<nowiki>http://blerdibler.com/chips</nowiki>. The previous configuration would
+send the request to <nowiki>http://blerdibler.com/index.php?q=chips</nowiki>,
+which of course doesn't exist, so we receive a 404. The fix for this is
+relatively simple, which is very unfortunate because I spent a long time
+finding this face-palmingly simple solution (mostly because once again, I do
+not know what I'm doing).
+
+The fix is to move the Drupal rewrite stuff to its own named location
+directive (I'll show what that looks like in a few), and reference that for the
+last case scenario. So, here's what my location directives look like that allow
+for me to hit up my sub directories as well as my rewritten Drupal pages.
+
+----
+location / {
+ index index.html index.htm index.php;
+ try_files $uri $uri/ @drupal;
+}
+location @drupal {
+ rewrite ^(.*)$ /index.php?q=$1 last;
+ break;
+}
+----
+
+So what we're doing here is trying all requests at face value. This means that
+Nginx tries to load http://blerdibler.com/anchovies when
+http://blerdibler.com/anchovies (a file called anchovies, not the directory) is
+called.
+
+If it can't load that, it tries http://blerdibler.com/anchovies/ (the directory
+called anchovies...consequently it searches for index.html/htm/php).
+
+Finally, if neither of those work, it calls the location directive called
+drupal (@drupal) which sends the request to
+http://blerdibler.com/index.php?q=anchovies. If that doesn't work, you're hosed
+and hopefully you've got an attractive 404 page set up. Incidentally, this also
+works for all nested Drupal/Wordpress instances as well (say, a drupal instance
+located at http://blerdibler.com/drupal2.
+
+Hopefully that helped someone out because I can't write anymore on this topic
+as I am now out of coffee. Sorry. If however, you have questions/comments/etc.,
+please leave them in the comments section and I will go brew up another cup o'
+joe and help you out (if I can...yet again...I still don't know what I'm
+doing).
+
+Thanks for reading!
+
+
+Category:nginx
+Category:Apache
+Category:Drupal
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Exim_Spam_Filtering_with_Bogofilter.ascii b/src/Exim_Spam_Filtering_with_Bogofilter.ascii
new file mode 100644
index 0000000..cb9578b
--- /dev/null
+++ b/src/Exim_Spam_Filtering_with_Bogofilter.ascii
@@ -0,0 +1,289 @@
+Exim Spam Filtering with Bogofilter
+===================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+I have been operating a personal email server for the past 4-ish years with
+very little trouble. My server itself received a truck-load of spam email, but
+none of it was delivered because every email was addressed to an account that
+didn't exist on my server (love that check_local_user filter). I received maybe
+one spam email every 3 - 6 months until recently when my email address was
+leaked in the link:Aol_Email_Hacked[Aol email breach]. While I'm a bit upset at
+Aol for that, I guess it was bound to happen sooner or later to one of the
+email providers, so I guess I can't be too upset. In the end, it's been a good
+experience because it forced me to [finally] learn to set up a spam filter with
+Exim.
+
+I searched the internet for several days weighing the pros and cons of each
+available spam filter (spamassassin, razor, dspam, bogofilter) until finally
+settling on http://bogofilter.sourceforge.net/[Bogofilter] due to it's small
+size and that it's written in C (might as well have something that _can_ handle
+a lot of spam, even if it isn't).
+
+Once I settled, I ran into the problem that spam filtering isn't a very well
+documented thing. All of its parts are _fairly_ well documented, but no one
+place really seems to put it all together with a good explanation of how each
+part interracts. Hopefully I can do that for you here.
+
+[[assumptions]]
+== Assumptions
+
+. Each user's mail is stored in *maildir* format
+. Each user's mail is stored in the *~/Mail* directory
+. Spam will be stored in a directory called *spam*
+. Less sure emails will be delivered to a *unsure* directory
+
+
+[[bogofilter-configuration]]
+== Bogofilter Configuration
+
+First, we need to set up the actual mail analysis software, Bogofilter. My
+bogofilter configuration is fairly simple. To keep things nicely relegated to
+one area of my server, I have my bogofilter logs and word databases stored in
+__/home/mail/bogofilter__.
+
+Regarding the configuration file (/etc/bogofilter/bogofilter.cf), I am using
+the following simple configuration.
+
+./etc/bogofilter/bogofilter.cf
+----
+bogofilter_dir = /home/mail/bogofilter/
+ham_cutoff = 0.60
+spam_cutoff = 0.80
+----
+
+To give you an idea of what that does, emails with a "spamicity" rank of 60% or
+higher are listed as *Unsure* (remember, ham is good email) and thus will be
+sent to the unsure mail directory. Emails with a "spamicity" rank of 80% or
+higher will be sent to the *spam* directory (see #Assumptions section).
+
+[[exim-configuration]]
+== Exim Configuration
+
+[[routers]]
+=== Routers
+
+Routers in Exim do just what their name indicates: route email.
+Specifically, they route email to transports, but more on those in the
+link:#Transports[next section]. One thing to note on these before we get
+to the actual configuration part, routers in Exim are all executed, in
+sequence, until the email is either denied or delivered.
+
+Note: To give the reader a better idea of where the spam-related routers go, I
+ have included the router names for the defaults to provide context.
+ Spam-related routers are listed in bold.
+
+./etc/mail/exim.conf
+----
+begin routers
+...
+dnslookup:
+...
+#
+# BOGOFILTER router
+#
+# Routes all mail to spam@domain.tld to the bogo_spam_transport
+bogo_setspam_router:
+ driver = accept
+ condition = ${if eq {$local_part}{spam} {yes}{no}}
+ transport = bogo_spam_transport
+
+# Runs the received email through as a neutral status to be scanned.
+bogo_check_router:
+ no_verify
+ check_local_user
+ domains = +local_domains
+ condition = ${if !eq {$received_protocol}{bogodone} {1}{0}}
+ driver = accept
+ transport = bogo_check_transport
+
+...
+system_aliases:
+...
+user_forward:
+...
+
+# Delivers bogo spam mail to the spam directory
+localuser_bogo_spam:
+ driver = accept
+ check_local_user
+ condition = ${if match{$h_X-Bogosity:}{Spam.*}{1}}
+ transport = local_delivery_spam
+ cannot_route_message = Unknown user
+
+# Delivers bogo unsure mail to the unsure directory
+localuser_bogo_unsure:
+ driver = accept
+ check_local_user
+ condition = ${if match{$h_X-Bogosity:}{Unsure.*}{1}}
+ transport = local_delivery_unsure
+ cannot_route_message = Unknown user
+
+...
+localuser:
+...
+----
+
+What we just did here is create four new routers. Here's what each does.
+
+bogo_setspam_router:: Sends emails sent to "spam@domain.tld" to the
+bogo_setspam_transport.
+
+bogo_check_router:: Sends _all_ emails to the bogo_check_transport.
+
+localuser_bogo_spam:: Sends all email to the local_delivery_spam transport.
+
+localuser_bogo_unsure:: Sends all email to the local_delivery_unsure transport.
+
+Those explanations make routers seem like they don't do much at all, and
+without corresponding transports, that would be true. Routers only serve to
+route mail that matches certain criteron to the appropriate transports.
+
+
+[[transports]]
+=== Transports
+
+Transports in Exim perform actions (you might also call these __drivers__).
+They are not processed unless an email is sent to them by a router.
+Consequently, they can be placed anywhere aned in any order within the
+*transports* section of the Exim config file.
+
+./etc/mail/exim.conf
+----
+begin transports
+...
+# Bogofilter will add X-Bogosity header to all incoming mail. This can go
+# anywhere in the transport section, usually at the very end after
+# address_reply
+bogo_check_transport:
+ driver = pipe
+ command = /usr/bin/exim -oMr bogodone -bS
+ use_bsmtp = true
+ headers_add = X-Bogofilterd: true
+ transport_filter = /usr/bin/bogofilter -d /home/mail/bogofilter -l -p -e -u
+ return_fail_output = true
+ group = mail
+ user = exim
+ home_directory = "/home/mail/bogofilter"
+ current_directory = "/home/mail/bogofilter"
+ log_output = true
+ return_path_add = false
+
+# This adds updates the bogofilter database with this email explicitely set as
+# spam (intended for spam@domain.tld)
+bogo_setspam_transport:
+ driver = pipe
+ command = /usr/bin/bogofilter -d /home/mail/bogofilter -s -l
+ use_bsmtp = true
+ return_fail_output = true
+ group = mail
+ user = exim
+ home_directory = "/home/mail/bogofilter"
+ current_directory = "/home/mail/bogofilter"
+ log_output = true
+
+
+# Called when delivering mail to the spam directory
+local_delivery_spam:
+ driver = appendfile
+ directory = $home/Mail/.spam
+ maildir_format
+ maildir_use_size_file
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+
+# Called when delivering mail to the unsure directory
+local_delivery_unsure:
+ driver = appendfile
+ directory = $home/Mail/.unsure
+ maildir_format
+ maildir_use_size_file
+ delivery_date_add
+ envelope_to_add
+ return_path_add
+----
+
+We just added four transports.
+
+bogo_check_transport:: Uses the _pipe_ driver. Essentially, this one is a
+ passthrough transport. It takes the email text and sends it through the
+ bogofilter binary with a neutral status. The bogofilter binary inserts a few
+ headers into the email as it processes, and then returns. The most important
+ of these headers for our purposes is the X-Bogosity header. This one will be
+ used later on for delivering mail to the correct directory.
+
+bogo_setspam_transport:: This transport also uses the _pipe_ driver. It is
+ called by the bogo_setspam_router, which only catches email sent to
+ "spam@domain.tld". The intent of this router is to mark all emails sent
+ through it explicitely as spam. This is so users can foward a spam email the
+ filters missed to "spam@domain.tld" and the filter will update itself to
+ assume the text in the received email is "spammy".
+
+local_delivery_spam:: This transport is a final delivery transport (the
+ appendfile driver). All email sent through this transport will be delivered
+ to the destination user's "spam" directory.
+
+local_delivery_unsure:: This transport is a final delivery transport (the
+ appendfile driver). All email sent through this transport will be delivered
+ to the destination user's "unsure" directory.
+
+
+[[a-few-examples]]
+== A Few Examples
+
+There are a few possible paths a given email could take through this system.
+
+
+[[a-spammy-email]]
+=== A Spammy Email
+
+If you get, for instance, an email that bogofilter would indicate is spam.
+Here's how its path would go using the previous configurations.
+
+. Exim receives the email. The bogo_setspam_router is skipped because the email
+ was sent to you, not spam@example.com
+
+. The next router in line, bogo_check_router, is used because it catches all
+ email. It routes the email through the bogo_check_transport transport.
+
+. The bogo_check_transport has been called and thus pipes the email through
+ the bogofilter binary
+
+. The bogofilter binary inserts the *X-Bogosity* header. In the case of this
+ email which is most likely spam, it will insert "X-Bogosity: Spam".
+
+. Exim continues through the routers since the email still has not been
+ delivered.
+
+. The next router in line is localuser_bogo_spam. It checks that the email
+ header "X-Bogosity" is equal to "Spam". In this case, the
+ bogo_check_transport inserted this header and value, and so this router sends
+ the email through the localuser_delivery_spam transport.
+
+. The localuser_delivery_spam transport (being called by the
+ localuser_bogo_spam), delivers the email to the user's spam directory.
+
+
+[[an-aspiring-spammy-emaill]]
+=== An Aspiring Spammy Emaill
+
+[[a-hammy-good-email]]
+=== A Hammy (Good) Email
+
+If anyone has questions about this post, please ask your question on the
+link:{{TALKPAGENAME}}[discussion page] and I'll try to get this updated with
+explanations. Setting up a mail server is hard enough for new folks, without
+adding the extra complication of spam filtering (I'm fairly new to this
+myself), so please ask any and all questions.
+
+
+
+Category:Mail
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Git:Care_Free_Committing.ascii b/src/Git:Care_Free_Committing.ascii
new file mode 100644
index 0000000..2e518c3
--- /dev/null
+++ b/src/Git:Care_Free_Committing.ascii
@@ -0,0 +1,97 @@
+Git:Care-free Committing
+========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+In the past, I have found myself a bit afraid to fully use git, because git
+history is something so painful to rewrite, especially when the repo is shared
+by other users. Besides, it just seems bad practice (and it is) to rewrite
+history.
+
+With true code, my concerns are a bit alleviated because most of the time you
+can test that locally. The situation I'm referring to is using git as a
+deployment mechanism for servers. Let me walk you through my old thought
+process.
+
+I want to try a new change for particular server type. I have two options. I
+can just log into the server and try out my change, hoping the one that I
+commit as a "copy-paste" later into the git repo works identically, or I can
+make the change inside the git repo, push it upstream, triggering the test
+deployment, which I can (and should) test with. However, what if the change
+doesn't work? I can fix it sure, but I'll muck up the history with unecessary
+"Broke it...fixed it" commits, and removing those will require rewriting
+history.
+
+
+== Branching
+
+Git is well known for its "cheap branching". Because it makes it so easy to
+rebase and merge onto any given branch.
+
+
+== Squashing Commits
+
+Firstly, find the first commit of your branch. We'll assume that this branch
+came off of master and that we are currently working inside this branch (if
+not, run +git checkout <branchname>+)
+
+----
+git log master..HEAD
+----
+
+That command will give you a list of all commits that have happened on your
+feature branch ahead of the master branch. Assuming someone hasn't rewritten
+history (which has happened to me before...ugh), you should be looking at only
+your branch's commits. Scroll to the bottom and copy the commit id for the very
+first commit in the series.
+
+Now run...
+
+----
+git rebase -i <commit_id>^1
+----
+
+Don't forget the "carrot 1" (+^1+) at the end there, as it is very important.
+We just told git to rebase the commit series on top of the most recent commit
+from master (the "carrot 1" says "one commit before this commit", hence one
+commit before your work started since you selected your first branch commit),
+interractively. Iterractive mode gives us a chance to tell git how to handle
+each commit, be it picking, squashing, editing, rewording, etc.
+
+Running the interractive rebase should bring you into an editor with text that
+looks something like...
+
+----
+pick e57d408 Implemented new ifcfg profile functionality
+pick cd476e8 Fixed minor issue
+pick 96a112b Fixed another stupid issue
+pick 9741e2c Testing a small change
+pick ec32a51 Revert "Testing a small change"
+pick 5d61d26 Revert "Fixed another stupid issue"
+...
+----
+
+Here we can change what we want to do with each commit as the rebase proceeds.
+In this case, I want to reduce my commit set down to one commit, the most
+recent (note in your set, the most recent is on the bottom).
+
+----
+pick e57d408 Implemented new ifcfg profile functionality
+s cd476e8 Fixed minor issue
+s 96a112b Fixed another stupid issue
+s 9741e2c Testing a small change
+s ec32a51 Revert "Testing a small change"
+s 5d61d26 Revert "Fixed another stupid issue"
+...
+----
+
+It doesnt matter what the commit messages are at this point. When the time
+comes to merge the commits, you'll get a chance to rewrite the commit message.
+
+Category:Git
+Category:Drafts
+
+// vim: set syntax=asciidoc:
diff --git a/src/Git_Basics.ascii b/src/Git_Basics.ascii
new file mode 100644
index 0000000..03af5d3
--- /dev/null
+++ b/src/Git_Basics.ascii
@@ -0,0 +1,220 @@
+Git Basics
+==========
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Git can be a very complicated thing. Someone once told me that we mere humans
+have a very difficult time with it at first. I myself have had a
+tremendous<nowiki>[ly difficult]</nowiki> time learning how to use Git (many
+thanks to http://marktraceur.info/[marktraceur] for all the help). It is an
+incredibly robust and so a very complicated solution. What source code
+management system isn't though (especially one that is command line)? This
+document should serve as a very high level view of how to use Git. It will not
+cover advanced functionality such as
+http://git-scm.com/docs/git-cherry-pick[cherry-picking],
+http://git-scm.com/docs/git-merge[merging],
+http://git-scm.com/docs/git-rebase[rebasing], etc. If something is not
+documented here, please see the http://git-scm.com/docs[Git docs] or suggest it
+on the discussion page.
+
+[[working-with-branches]]
+Working with Branches
+---------------------
+
+Branches in Git look are like tree branches. The Git repository itself is the
+trunk and the branches are the various projects in the repository. Typically
+(hopefully) these projects are related to each other. In the case of a
+development project with a frequently changing database schema that you wanted
+to back up, the repository would have two branches: the files branch where the
+code files are stored, and the database branch where the database dumps are
+stored.
+
+[[viewing-branches]]
+Viewing Branches
+~~~~~~~~~~~~~~~~
+
+Viewing branches is simple. Type *git branch* and you should see output
+similar to the following:
+
+----
+$ git branch
+
+* database
+ master
+----
+
+To use a different branch, a the checkout command is required. In this case, we
+will switch from the _database_ branch to the _master_ branch.
+
+Note:Some decompression happens here so if the branch to be checked out is very
+ large, this will likely take a few seconds.
+
+----
+$ git checkout master
+
+Checking out files: 100% (6110/6110), done.
+Switched to branch 'master'
+----
+
+[[commits]]
+Commits
+-------
+
+Git does not have commitmentphobia. In fact, it loves commits as if it were its
+only purpose in life.
+
+In most if not all source code management software, a commit is essentially a
+set of changes to be merged into the master repository.
+
+To create a commit, there are several steps that need to take place.
+
+Firstly, the changed files to be pushed to the repository need to be added. For
+this, we use the _git add_ command.
+
+----
+$ git add ./ex1.blah
+$ git add ./example2.blah
+----
+
+One handy bit for this is the _-A_ switch. If used, git will recursively add
+all files in the specified directory that have been changed for the commit.
+This is very handy if many files were changed.
+
+----
+$ git add -A .
+----
+
+Once the changes files are set up for commit, we just need one more step. Run
+_git commit_ and you will be taken to a text editor (likely vi
+- specified in the repository configuration) to add comments on your commit so
+ you and other developers know what was changed in your commit in case
+something is broken or someone wants to revert.
+
+_This piece is key if you are using the git repository as a code repository
+rather than a versioning repository for backups. Please write in meaningful
+comments._
+
+There is actually one more piece to committing a change if you have a remote
+repository on another box or a different location on the local box. So other
+developers can pull the repository and get your changes, you need to _push_
+your changes to the remote repository. Please see the
+link:#Pushing_Changes_to_the_Remote_Repository[Pushing Changes to a Remote
+Repository] section for more information on this. To do this, we use the _git
+push_ command.
+
+
+[[logs]]
+Logs
+----
+
+All of this commit and commit log business is a bit worthless if we can't look
+at logs. To look at the logs we use the _git log_ command. This will open up
+your system's pager (typically less is the one used) to view the logs for the
+current branch. If you wish to view the logs on a different branch, you can
+either check out that branch, or you can type __git log BranchName__.
+
+A handy option for the _git log_ command is the _--name-status_ switch. If you
+use this switch, git will list all of the commit logs along with all of the
+files affected and what was done (modified, deleted, created, renamed) in each
+individual commit.
+
+
+[[remote-repositories]]
+Remote Repositories
+-------------------
+
+Git is a distributed code versioning system which means that every person that
+has pulled the repository has a complete copy of the original. This is really
+great for working remotely because you don't have to be online and able to talk
+to the remote repository to see change history.
+
+
+[[adding-a-remote-repository]]
+Adding a Remote Repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Git needs several things to add a remote repository. Firstly, it needs a
+local alias for the remote repository. It also needs a username to log
+in to the repo with, as well as the ip address or hostname of the
+repository, and the path to the actual repo directory on the remote
+server. With that, to add a remote repository the command looks somewhat
+like this:
+
+----
+git remote add origin gitman@someserver.org:repos/CleverProjectName
+----
+
+Now, let's break down what that all means since it seems a tad complicated.
+
+[cols=",,,,,",options="header",]
+|===========================================================================
+|git remote |add |origin |gitman |@someserver.org | :repos/CleverProjectName
+|This is the command to work with remote servers in git.
+|Tells git we are adding a remote
+|The local alias for the remote. Origin is typically used here.
+|The username to log in to the remote server with.
+|This is the server where the repo is stored
+|This is the path to the actual repository directory. Since it does not start
+ with a / it starts in the home directory of gitman (~/).
+|=======================================================================
+
+[[fetching-a-remote-repository]]
+Fetching a Remote Repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we have a remote repository added to our local git repository, we
+simply need to fetch the repo. To do this we use the _git fetch_ command. Here
+is where that alias from the remote add command comes in handy.
+
+----
+git fetch origin
+----
+
+This command will fetch all branches of the origin repository.
+
+[[pushing-changes-to-the-remote-repository]]
+Pushing Changes to the Remote Repository
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that we have a local copy of a repository to work on and have made some
+changes, some amount of code synchronization needs to take place with an origin
+repository so each of the developers can have the latest-and-greatest. With
+that, a commit only pushes code to your local copy of the repository. What
+needs to happen after a commit is to push the change to the origin repository
+so everyone else will also have access to your change set. To do this, we use
+the _git push_ command.
+
+There are two parameters for this though. The first is the local alias for the
+remote repository (typically referred to as origin since presumably the remote
+server is where your repository originated). The second parameter is the branch
+name. Since we often have more than one branch, this is a good piece to pay
+attention to so you don't submit a database dump file to the code branch.
+
+----
+git push origin master
+----
+
+
+[[dealing-with-size-issues]]
+Dealing with Size Issues
+------------------------
+
+Since git is a code versioning system that contains as many versions of a file
+as the number of commits, its size can grow out of hand rather quickly,
+especially when dealing with binaries. Luckily, there is a handy command for
+this very situation: **git gc**.
+
+This command compresses all of your repository branches in the context of each
+other. This can reduce the size of your local and/or remote repositories very
+effectively. I have a repository that should be several gigabytes with about 60
+commits per branch (it's a repo used for versioned backups), and _git gc_
+reduced it to about 370 megabytes.
+
+
+Category:Git
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:Formatting_a_Hard_Drive.ascii b/src/Linux:Formatting_a_Hard_Drive.ascii
new file mode 100644
index 0000000..51ef09b
--- /dev/null
+++ b/src/Linux:Formatting_a_Hard_Drive.ascii
@@ -0,0 +1,108 @@
+Linux:Formatting a Hard Drive
+=============================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Good afternoon everyone or good evening/morning, depending on which time zone
+you're reading this from...
+
+*Ahem*
+
+Good afternoon from GMT -7 everyone (much better),
+
+If you've done anything with Linux-based servers you have most likely at one
+time or another had to format a hard drive, which unfortunately can be quite
+the feat in Linux if you're not too comfortable with the command line (which if
+you're a linux sys admin, you shouldn't be). In this post, I will be describing
+how to format an ENTIRE drive (doing a portion is a bit more complicated...post
+in the comments section if you want to see a post on how to do a partial
+format).
+
+[[finding-which-drive-to-format]]
+== Finding which drive to format
+
+To start off, we need to find the disk that needs to be formatted. Do this by
+typing
+
+----
+sudo fdisk -l
+----
+
+If the disk has not been formatted you should
+see
+
+----
+Disk /dev/ doesn't contain a valid partition table.
+----
+
+If the drive has already been formatted you need to either identify the drive
+by the amount of space (the blocks column...it's in kilobytes. For example:
+249023502 is roughly 250 gigabytes). Another method is to use
+
+----
+mount -l
+----
+
+The drive should show up as **/dev/ on /media/**.
+
+
+[[formatting-the-drive]]
+== Formatting the drive
+
+To start up the format process, let's type
+
+----
+fdisk /dev/sdc
+----
+
+(sdc is our example drive. The drive you want to format was found in the
+previous step).
+
+If your drive already has a partition table, you need to delete that. Do this
+by typing the letter *"d"* and pressing enter.
+
+If the drive is NOT formatted yet, all you need to do here is press the letter
+**"n"**.
+
+Fdisk will now prompt you to give it a start and end block for the partition
+(this is essentially how much of the drive to create the partition table for).
+If you want to format the entire drive, just hit enter twice to select the
+defaults (the first and the last blocks...the entire drive).
+
+Now that we've selected which parts of the drive to format, press *"w"* to
+write the changes to the disk (up to this point, no changes have been made so
+if you want to get out, now is the time).
+
+Now that we've formatted the drive and created the partition table, we can
+mount the drive. To mount the drive, there are two options.
+
+First, the drive can be removed and plugged back in. This will cause an
+auto-mount (if that's enabled on your machine). The other way is to use the
+mount command. To do this, we need a mount point. This can simply be a folder
+where your drive will show up (without getting too complicated). For this
+example, I'll put a folder at *.
+
+Now, earlier when we formatted the hard drive, we formatted the drive located
+at* (drive sdc partition 1). Now, with that out of the way, let's mount
+partition one.
+
+Type *
+
+What that does is mount partition one (/dev/sdc1) at *.
+
+Many people say practice makes perfect. With that, go practice formatting on
+all of your hard drives and usb sticks.   :)
+
+Once again...
+
+*Disclaimer: I am not responsible for any loss of data or damage to
+personal property due to attempting the contents of this article.*
+
+
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:Luks_Password_Changing.ascii b/src/Linux:Luks_Password_Changing.ascii
new file mode 100644
index 0000000..96e3790
--- /dev/null
+++ b/src/Linux:Luks_Password_Changing.ascii
@@ -0,0 +1,43 @@
+Linux:Luks Password Changing
+============================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Given my most recent posts about Linux Linux:System_Encryption[system
+encryption] and Linux:dm-crypt_Encrypted_Home_Directories[encrypted home
+directories], I think this post is a good followup since account passwords
+should be changed routinely.
+
+I use http://code.google.com/p/cryptsetup/wiki/DMCrypt[dm-crypt] with a
+http://code.google.com/p/cryptsetup/[LUKS header] for my work computer's
+encryption. It uses my Active Directory password for the luks password. While
+my Windows password is a very safe one, Windows NTLM
+https://securityledger.com/2012/12/new-25-gpu-monster-devours-passwords-in-seconds/[is
+not the most secure hashing algorithm] on the planet, but I digress.
+
+I just changed my password at work after 3 months of use, which means I've got
+to update my LUKS header with the new key and remove the old one (it still
+works fine, I just want one password for my logins). Yes, this is in the man
+page, but I thought I'd post this here for anyone too lazy (like myself) to
+hunt through the man page. It turns out there is a change key feature of
+cryptsetup.
+
+----
+luksChangeKey <device>
+----
+
+If you run that command, it will ask you for the old password. Type that in and
+if it matches, you will be prompted to enter the new password twice. Once
+that's done, there's no need to umount and remount. The next time the volume is
+remounted though, it will require the new password.
+
+
+Category:Linux
+Category:Security
+Category:Encryption
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:RAID_Setup.ascii b/src/Linux:RAID_Setup.ascii
new file mode 100644
index 0000000..e9455cc
--- /dev/null
+++ b/src/Linux:RAID_Setup.ascii
@@ -0,0 +1,253 @@
+After fighting with the problem detailed in my Btrfs:RAID_Setup[ last
+post] about this, I decided to go hunting for information about RAID 5
+implementation in btrfs. It turns out that it hasn't been completely
+implemented yet. Given the status verbage on their wiki page, I'm
+surprised it works at all. I suspect the wiki isn't entirely up to date
+though since it does seem to work to a certain extent. I still need to
+do more research to hunt this down though.
+
+You can find that wiki post
+https://btrfs.wiki.kernel.org/index.php/Project_ideas#Raid5.2F6[here].
+
+[[the-new-new-solution]]
+== The NEW New Solution
+
+Since RAID 5/6 is not yet completely implemented in Btrfs, I need to find
+another solution. Given that I still want redundancy, the only other obvious
+option I thought I had here was a
+http://en.wikipedia.org/wiki/Standard_RAID_levels#RAID_1[RAID 1] configuration.
+However, as many Google searches do, searching for something leads to something
+else very interesting. In this case, my search for Linux RAID setups sent me
+over to the official kernel.org
+https://raid.wiki.kernel.org/index.php/Linux_Raid[RAID page], which details how
+to use http://en.wikipedia.org/wiki/Mdadm[mdadm]. This might be a better option
+for any RAID level, despite Btrfs support since it will detatch dependency on
+the filesystem for such support. Everyone loves a layer of abstraction.
+
+[[setup---raid-5]]
+=== Setup - RAID 5
+
+Let's get the RAID array set up.
+
+----
+mdadm -C /dev/md0 -l raid5 -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
+# Or the long version so that makes a little more sense...
+mdadm --create /dev/md0 --level raid5 --raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
+----
+
+
+[[setup---raid-1]]
+=== Setup - RAID 1
+
+----
+mdadm -C /dev/md0 -l raid1 -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
+# Or the long version so that makes a little more sense...
+mdadm --create /dev/md0 --level raid1 --raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
+----
+
+
+[[what-just-happened]]
+=== What Just Happened?
+
+[cols=",,,",options="header",]
+|=======================================================================
+|mdadm |-C,--create /dev/md0 |-l,--level raid5 |-n,--raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
+|
+|Create a virtual block device at /dev/md0
+|Set the raid level to RAID 5 for our new device
+|The number of RAID devices is 3 - /dev/sdb1, /dev/sdc1, and /dev/sdd1.
+|=======================================================================
+
+
+[[the-rest]]
+=== The Rest
+
+We did just create a RAID array and a virtual device to map to it, but that's
+all. We still need a filesystem. Given that this whole series of posts has been
+about using Btrfs, we'll create one of those. You can still use whatever
+filesystem you want though.
+
+----
+mkfs.btrfs /dev/md0
+mount /dev/md0 /mnt/home/
+----
+
+
+[[mounting-at-boot]]
+=== Mounting at Boot
+
+Mounting at boot with mdadm is a tad more complicated than mounting a typical
+block device. Since an array is just that, an array, it must be assembled on
+each boot. Thankfully, this isn't hard to do. Simply run the following command
+and it will be assembled automatically
+
+----
+mdadm -D --scan >> /etc/mdadm.conf
+----
+
+That will append your current mdadm setup to the mdadm config file in /etc/.
+Once that's done, you can just add /dev/md0 (or your selected md device) to
+/etc/fstab like you normally would.
+
+
+[[simple-benchmarks]]
+== Simple Benchmarks
+
+Here are some simple benchmarks on my RAID setup. For these I have three
+1TB Western Digital Green drives with 64MB cache each.
+
+
+[[single-drive-baseline]]
+=== Single Drive Baseline
+
+[[ext4]]
+==== Ext4
+
+1GB Block Size 1M (1000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
+1000+0 records in
+1000+0 records out
+1048576000 bytes (1.0 GB) copied, 4.26806 s, 246 MB/s
+----
+
+1GB Block Size 1K (1000000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
+1000000+0 records in
+1000000+0 records out
+1024000000 bytes (1.0 GB) copied, 6.93657 s, 148 MB/s
+----
+
+
+[[raid-5]]
+=== RAID 5
+
+[[btrfs]]
+==== Btrfs
+
+1GB Block Size 1M (1000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
+1000+0 records in
+1000+0 records out
+1048576000 bytes (1.0 GB) copied, 3.33709 s, 314 MB/s
+----
+
+
+1GB Block Size 1K (1000000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
+1000000+0 records in
+1000000+0 records out
+1024000000 bytes (1.0 GB) copied, 7.99295 s, 128 MB/s
+----
+
+[[ext4-1]]
+==== Ext4
+
+1GB Block Size 1M (1000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
+1000+0 records in
+1000+0 records out
+1048576000 bytes (1.0 GB) copied, 12.4808 s, 84.0 MB/s
+----
+
+1GB Block Size 1K (1000000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
+1000000+0 records in
+1000000+0 records out
+1024000000 bytes (1.0 GB) copied, 13.767 s, 74.4 MB/s
+----
+
+[[raid-1]]
+=== RAID 1
+
+[[btrfs-1]]
+==== Btrfs
+
+1GB Block Size 1M (1000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
+1000+0 records in
+1000+0 records out
+1048576000 bytes (1.0 GB) copied, 3.61043 s, 290 MB/s
+----
+
+1GB Block Size 1K (1000000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
+1000000+0 records in
+1000000+0 records out
+1024000000 bytes (1.0 GB) copied, 9.35171 s, 109 MB/s
+----
+
+
+[[ext4-2]]
+==== Ext4
+
+1GB Block Size 1M (1000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
+1000+0 records in
+1000+0 records out
+1048576000 bytes (1.0 GB) copied, 8.00056 s, 131 MB/s
+----
+
+1GB Block Size 1K (1000000 blocks)
+
+----
+[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
+1000000+0 records in
+1000000+0 records out
+1024000000 bytes (1.0 GB) copied, 9.3704 s, 109 MB/s
+----
+
+
+Those aren't exactly dazzling write speeds, but they're also not too bad, given
+what's happening in the background and that I'm using three standard 7200 rpm
+desktop drives with 64MB of cache a piece. Later down the line I might test
+this with a RAID 0 to see what the max speed of these drives are (though it
+should predictably be three times the current speed).
+
+
+[[final-thoughts]]
+== Final Thoughts
+
+My favorite thing about this at this point is the layer of abstraction doing
+RAID through mdadm provides (we all know how much Linux folk love modularity).
+Using the RAID functionality in Btrfs means I am tied to using that filesystem.
+If I ever want to use anything else, I'm stuck unless what I want to move to
+has its own implementation of RAID. However, using mdadm, I can use any
+filesystem I want, whether it supports RAID or not. Additionally, the setup
+wasn't too difficult either. Overall, I think (like anyone cares what I think
+though) that they've done a pretty great job with this.
+
+Many thanks to the folks who contributed to mdadm and the Linux kernel that
+runs it all (all 20,000-ish of you). I and many many other people really
+appreciate the great work you do.
+
+With that, I'm going to sign off and continue watching my cat play with/attack
+the little foil ball I just gave her.
+
+
+
+Category:Linux
+Category:Btrfs
+Category:Ext4
+Category:Storage
+Category:RAID
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:Secure_Authentication.ascii b/src/Linux:Secure_Authentication.ascii
new file mode 100644
index 0000000..9b21934
--- /dev/null
+++ b/src/Linux:Secure_Authentication.ascii
@@ -0,0 +1,264 @@
+Linux:Secure Authentication
+===========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+:github: https://github.com/nullspoon/
+
+
+== {doctitle}
+
+**Edit**: I wrote the script for automating this finally. It can be found on my
+link:{github}/keymanage[GitHub].
+
+In my experience, Linux authentication seems to be one of those problems with
+so many answers. It's hard to define even a range of methodologies that could
+be considered right, let alone narrowing it down to one or two. I've been
+dealing with this one at work quite a bit recently at work and would like to
+post here an idea I had. Just to be warned, this idea was not accepted for our
+solution, despite no one being able to give me more than one reason to not use
+it, which I will detail at the end of this post along with any other exploits I
+can imagine for this authentication methodology.
+
+[[in-a-perfect-world...]]
+== In a perfect world...
+
+In a perfect world, chroot environments would work securely and our app
+developers and third party vendors would write code on par with apache or
+openssh which could be started as root and spawn child processes in user space
+for security. All application files would fit nicely into the defined standards
+for Linux filesystem organization so we could package everything up nicely and
+deploy using repo servers. To top it all off, all applications would roll their
+own logs instead of filling up /var/log or somewhere on / since they rarely
+follow standards. However, this is rarely if ever the case (I've never seen it
+at least).
+
+What I've seen up to this point is third party applications that install
+themselves exclusively in /opt; applications that are hard coded to not start
+unless running as uid 0 (root); binary startup scripts that situate themselves
+in /etc/rc.d/init.d/ (wtf guys?), and just general stubborness as to where the
+program is located.
+
+[[securing-an-application-server]]
+== Securing an Application Server
+
+The first step I typically take to securing applications is to run them in user
+space as a service account with access only to its directory in the /apps mount
+point. I put that one to use on my own servers and it has served me very well.
+However, with this we have a few problems.
+
+[[accessing-service-accounts]]
+== Accessing Service Accounts
+
+While security does tend to introduce complications and interruptions into
+workflow, it shouldn't be catastrophic. If your security measures are so
+strict, your users can't do what they need to, you're doing it wrong. Simply
+running in userspace introduces several problems. A few for example...
+
+1. How do your users get to their service accounts in a secure way (no shared
+ passwords or keys)?
+
+2. How do your users transfer files to and from their servers since they can't
+ directly access the service accounts?
+
+3. How do you manage this web of shared account access without it consuming
+ much of your time?
+
+Specifically, a solution is needed for the users to access their service
+accounts in an accountable and auditable way without hindering their ability to
+do their jobs [too much].
+
+This has been a problem myself and some fellow engineers have struggled with
+for a while now. Here's a few common service account authentication mechanisms
+that I'm sure we've all seen that aren't necessarily the greatest.
+
+
+[[service-account-passwords]]
+=== Service Account Passwords
+
+1. They need to be shared for multiple users to have access
+
+2. They can be shared without the admins knowing (no accountability)
+
+3. They have to be routinely changed which causes a huge headache for everyone
+ involved, os and app admins alike
+
+
+[[service-account-keys]]
+=== Service Account Keys
+
+1. They need to be shared for multiple users to have access
+
+2. They can be shared without the admins knowing (no accountability)
+
+3. They have to be routinely changed which causes a slightly lesser headache
+ than passwords for everyone involved, os and app admins alike
+
+
+[[sudo]]
+=== Sudo
+
+Sudo provides a pretty clean solution to the problem. It allows you to
+limit who has access to the service account as well log who uses it and
+when. Just put your application admins into their own group and give
+that group explicit access to run ONE command...
+
+[[sudo-su---service_account]]
+==== sudo su - service_account
+
+This one is tremendously popular for very obvious reasons. However, despite
+using sudo, this one still has problems
+
+1. Your end users can't perform file transfers between their boxes since can't
+ directly access their service accounts without a key or password
+
+2. We still lack accountability. Once the user is in a sudo'd shell, their
+ commands are no longer logged.
+
+3. Managing this across an environment can be a very time consuming thing
+ unless you have a source on a server that you propogate out, but then you
+ have to deal with server compliance.
+
+Granted, there is a pretty obvious _Unixy_ solution to this, but it involves
+your users all being in the same group as your service account, mucking around
+with umasks that unset themselves on reboot unless explicitely set, and making
+sure your sticky bit sticks.
+
+There is another way though.
+
+[[my-poorly-formed-idea]]
+== My Poorly Formed Idea
+
+My idea uses a combination of the crontab, jump hosts, ssh keys, and segregated
+networks.
+
+Start with two (or more) segregated networks: one for administration, and
+several for operations. You will probably want three for operations:
+production, QA, and dev.
+
+From there, you put your servers in your operations networks and set up
+firewall or routing rules to only allow ssh (port 22 or whatever port you
+prefer) traffic between the administration network and the operations networks.
+Your operations networks should now only be accessible for users using the
+applications and admins coming in from the administration network using ssh.
+
+Next, build out a jump box on your administration network. One per application
+would be ideal for seperation of concerns, but one for all apps should work
+well also. For sake of simplicity, we'll assume a single jump host.
+
+Next, put all of your service accounts on that jump host with their own home
+directories in /apps. This assumes you have defined and reserved UIDs and GIDs
+for each of your service accounts so they can be on one system without
+conflicts. Provide sudo access to each user group to _sudo su -
+<service_account>_ into their respective service accounts on the jump host.
+
+At this point, the application admins/owners still don't have access to their
+service accounts on the operations servers. Here's where they get that access
+using rotating ssh keys. Write a script to generate ssh keys (I'll post the
+source for mine later), ssh out to a box using the key to be replaced, push the
+new key, and remove the old key and any others while using the new key. This
+allows you to schedule key changes automatically using cron. With that in
+place, just have the script swap out each service account's key every x minutes
+(15 or 30 is what I have in mind). Once you've got the key exchange working,
+modify the sshd_config files throughout your environment to disallow all user
+login over ssh with passwords, that way if your users do set a password to try
+to circumvent your security, it won't be accepted anyways. You can also just
+disable password changing.
+
+[[pros]]
+== Pros
+
+[[operations-networks-become-a-black-box]]
+=== Operations Networks Become a Black Box
+
+With this method, there is only one way in to every single operations
+box. That one way in is in a secured subnet, presumably accessible only
+through a vpn or when on site.
+
+[[file-transfers-are-seamless]]
+=== File Transfers are Seamless
+
+Users can use scp or sftp to transfer files seamlessly using the jump host as
+the medium. If the keys are always regenerated as id_rsa, or the ssh config
+file is set up for each account, key regeneration won't affect anyone because
+it takes milliseconds to overwrite the old key with the new one, so any new
+connections out will use the new key. End users shouldn't even see an effect.
+
+[[safety-despite-turnover]]
+=== Safety Despite Turnover
+
+If your company has any measure of turnover, you've undoubtedly gone through
+the password and key change process after an employee leaves the team. With
+this method, you're automatically changing the key every X minutes, so even if
+they do get the key, it'll only be valid for a very short while.
+
+[[lower-licensing-costs]]
+=== Lower Licensing Costs
+
+Many companies, through the use of additional software such as Open LDAP,
+Samba, or some other third party product, put their Linux/Unix servers on their
+Windows Domain. A perk of this is it provides access to Linux to your AD users
+without having to manage a few hundred or thousand passwd, group, and shadow
+files. The downside to this is that if a third party product is used, it costs
+a lot of money in licenses. With the jump host rotating key model, you can put
+just the jump host(s) on the domain, and leave all operations servers off of
+the domain. It saves on licensing costs, maintainence time, and software
+installs. It also removes yet one more service running on your operations boxes
+which removes one more access point for exploitation. Additionally, the fewer
+pieces of software running on a server, the less chance an update will break
+the applications it's hosting.
+
+
+[[clean-home-directories]]
+=== Clean Home Directories
+
+Next up, clean home directories. If you have an entire team of developers
+and/or application admins logging into every operations system, /home is going
+to be very large on lots of systems, costing money for backups (if you back
+home directories up that is), wasting storage space (which is fairly cheap
+these days though), and adding spread to your users's files, making it
+cumbersome for everyone to manage, including non system admins. With the jump
+host rotating key method, all of your home directories are on one host, so file
+management for the support staff is much easier.
+
+
+[[cons]]
+== Cons
+
+
+[[single-point-of-failure]]
+=== Single Point of Failure
+
+This is the one objection I heard from people at work. This can be
+mitigated in at least two ways. One is by having one jump host per
+application. It still beats putting hundreds or thousands of systems in
+AD and all the management and licensing costs that goes with that.
+Another way to mitigate this is to have a seconday jump host and set up
+rsync to synchronize the primary jump host with the backup, using the
+backup as a hot standby.
+
+
+[[single-point-of-global-access]]
+=== Single Point of Global Access
+
+This is the one problem with this idea that I think is most relevant and
+potentially exploitable. However, if your administration boxes are on a network
+that is not reachable from anywhere but controlled locations, this shouldn't be
+too big of a deal. However, if a mistake is made in the networking security or
+routing and a malicious user gets to a jump host, they still have to get into
+the service accounts which are inaccessible except through sudo, which means
+the malicous user has to exploit an existing account. Without that account's
+password though, they can't sudo so they would only have access to that one
+user's files. Even if they could sudo though, they will still only have access
+to the service accounts that user works with, so their impact would be minimal
+unless that user works on very high profile applications. To sum it up, there
+are three very solid security measures in place (network segretation, user
+accounts, limited sudo access requiring passwords) that the malicious user has
+to get through before having any really impacting access.
+
+
+Category:Linux
+Category:Security
+Category:Authentication
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:System_Encryption.ascii b/src/Linux:System_Encryption.ascii
new file mode 100644
index 0000000..e9ff71b
--- /dev/null
+++ b/src/Linux:System_Encryption.ascii
@@ -0,0 +1,155 @@
+Linux:System Encryption
+=======================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+As mentioned in a Linux:dm-crypt_Encrypted_Home_Directories[previous post], I
+use dm-crypt with a luks header and the pam-mount module to encrypt and mount
+the home directories on my laptop and server. While this works fantastically,
+it does have a potential fatal flaw, which is that my operating system is
+readily available to a would-be attacker. For instance, if they were skilled
+enough (which I am not), they could modify the any number of applications on my
+system to, quitely dump or send my encryption key password the next time I
+mount my home directory, thus defeating my security. Further, my system is
+readily available for any linux user good with mounting and chroot knowledge
+(which is probably most of us), and thus one could do all kinds of mischief on
+the unencrypted system partition of my computer.
+
+I'm sure this is a bit tin-foil hatted of me. I have nothing to hide (though
+it's not about that, it's a matter of principle). Further, there is no one
+[_that I know of_] who would be *that* interested in me or my data. Despite,
+this is a very cool thing that I am doing purely because it can be done (in
+slang I believe the term is "the cool factor").
+
+[[a-preliminary-note]]
+== A Preliminary Note
+
+I would not recommend this be done for servers or multi-user laptops or
+desktops. This process requires that a password be typed or a key be available
+every time the system is booted, which requires physical presence to do so.
+Since most servers are administered and used remotely over a network, a reboot
+would me a service outtage until someone were able to open a local terminal to
+type the password (to say nothing about having to share the password with
+multiple people).
+
+[[overview]]
+== Overview
+
+Due to the scope of this post and that I don't want to focus on documenting
+some other tasks that are more generic and less related to the actual
+encryption of the system, I will not be covering how to back up your system or
+to partition your drive. However, please see the following two notes.
+
+During the installation process we will...
+
+. Set up encryption
+. Modify the grub defaults so it properly sets up the loop device on boot
+. Modify the Initramfs Configuration (this one is Arch Linux specific)
+
+[[setting-up-encryption]]
+Setting Up Encryption
+~~~~~~~~~~~~~~~~~~~~~
+
+We're going to assume here that the system partition will be installed
+on sda2. With that, let's "format" that with luks/dm-crypt.
+
+WARNING: Again, back up your data if you haven't already. This will irrevocably
+ destroy any data on the partition [unless you are good with data
+ recovery tools].
+
+----
+cryptsetup luksFormat /dev/sda2
+----
+
+And so our installation can continue, the loop device needs to be set up and a
+filesystem created
+
+----
+# Open the encrypted container to the system map device (though you can name it whatever you want)
+cryptsetup luksOpen /dev/sda2 system
+# ...Type the password
+# Create the filesystem here - I use btrfs
+mkfs.your_choice /dev/mapper/system
+# Mount the filesystem
+mount /dev/mapper/system /mnt/ # Or wherever your distro's installation mount point is
+----
+
+Now that this is done, it's time to re-install or copy from backups your system
+to the new encrypted container.
+
+[[modifying-the-grub-defaults]]
+Modifying the Grub Defaults
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now that the system partition is setup up and our system re-installation is
+complete, it's time to configure Grub so it knows the system partition is
+encrypted. Without this step, you won't get past the initramfs since an
+encrypted system partition without a password is effectively useless. Here I
+will again assume your system partition is on /dev/sda2..
+
+Change...
+
+./etc/default/grub
+----
+...
+GRUB_CMDLINE_LINUX_DEFAULT="quiet"
+...
+----
+
+...to ...
+
+./etc/default/grub
+----
+...
+
+GRUB_CMDLINE_LINUX_DEFAULT="cryptdevice=/dev/sda2:system quiet"
+...
+----
+
+
+[[modifying-the-initramfs-configuration]]
+Modifying the Initramfs Configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This part is oriented towards https://archlinux.org[Arch Linux]. Modifying the
+initramfs generation configuration is something that varies from distribution
+to distribution. I run Arch, so Arch it is! (let me know though if you want to
+know how to do it on another distribution and I'll figure it out and update the
+post).
+
+This is actually very simple on Arch. Simply open _/etc/mkinitcpio.conf_
+and edit the *HOOKS* line. What matters here is that the *encrypt* hook
+occurs _before_ the *filesystems* hooks.
+
+./etc/mkinitcpio.conf
+----
+...
+HOOKS="base udev autodetect modconf block encrypt filesystems keyboard fsck"
+...
+----
+
+Once you've done that, save and close the config file and run
+
+----
+mkinitcpio -p linux
+----
+
+You should be able to now reboot your system and it will prompt you for a
+password immediately after grub. If you were successful, you should be brought
+to a screen that looks something like...
+
+[role="terminal"]
+----
+A password is required to access the sda volume:
+
+Enter passphrase for /dev/sda2:_
+----
+
+
+Category:Encryption Category:Security
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:Vpnc_Restart_Script.ascii b/src/Linux:Vpnc_Restart_Script.ascii
new file mode 100644
index 0000000..d7f7d0a
--- /dev/null
+++ b/src/Linux:Vpnc_Restart_Script.ascii
@@ -0,0 +1,47 @@
+Linux: VPNC Restart Script
+==========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+The VPN at my company is very... spotty... at best. When working from home, it
+used to boot you about once every hour. For whatever reason though, it has
+recently started booting sessions every five minutes. Now, the solution is of
+course to speak with our networking folks rather than to write a script to fix
+the issue on a client by client basis. Unfortunately, due to the culture and
+various political situations, the networking folks will not fix this because
+they don't believe it's an issue. All opinionattion aside, this sounds like an
+opportunity for a nice shell script.
+
+To start things off, on my Linux box I use vpnc from the command line as I
+don't want to install network manager due to additional resource consumption
+(albeit a very small amount). That said, throw the following script in
++~/bin/vpnconnect+ and include +~/bin+ in your PATH variable (+export
+PATH=~/bin:$\{PATH}+).
+
+[[source]]
+== Source
+
+_Edit_: Found a pretty sizeable flaw in my script. Pulled the source until I
+can sort it out.
+
+
+[[order-of-operations]]
+== Order of Operations
+
+. Check if vpnc is already running
+ * Start if it is not running
+. Start an infinite loop
+ . Sleep 5 to keep from using too many resources
+ . Check cpu time on pid - if it is greater than 1 minute
+ * Kill pid and restart vpnc
+
+
+
+Category:Linux
+Category:Drafts
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii b/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii
new file mode 100644
index 0000000..8ee0e94
--- /dev/null
+++ b/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii
@@ -0,0 +1,213 @@
+Linux:dm=crypt Encrypted Home Directories
+=========================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+There are three primary methods for encrypting one's home directory seamlessly
+in Linux: http://en.wikipedia.org/wiki/Dm-crypt[dm-crypt],
+http://ecryptfs.org/[eCryptFS], and http://www.arg0.net/encfs[EncFS]. All
+differences aside, this post will cover dm-crypt (as indicated by the title of
+course). A few things to note before going forwards though. First, this method
+is by no means the standard. I'm not even sure if there is a standard way to do
+this. This is just the way I've done it and it has worked out swimingly thus
+far on more than one computer. Secondly, my method detailed here will use
+something called http://code.google.com/p/cryptsetup/[LUKS]. I highly recommend
+this, if not just for convenience. While it does have its pitfalls, they
+shouldn't be too bad if you keep a backup of your data. Really though, when
+encrypting, you should _always_ keep more than one copy of your data in case
+something goes awry.
+
+Before proceeding, here is a list of what this will give you once completed, so
+you can decide if this is what you want before reading this monolithic post .
+
+. Users will each have their own encrypted home directory.
+ * Each home directory will be unlocked using the user's own password.
+ * Users have complete storage anonimity. Even root can't tell how many
+ files they are storing, filenames, or even how much data they have unless
+ the user is logged in at the time of inspection.
+. User's home directories will be seamlessly decrypted and mounted at login.
+. Users will have their own virtual device, so they will have a storage
+ "quota". To expand it, the virtual device needs to be extended on its own
+ (some might consider this cumbersome).
+
+
+[[setup]]
+== Setup
+
+This should be relatively simple. Install a package likely called *cryptsetup*
+(most of the mainstream distros should have it). This is the utility we will be
+using to manage dm-crypt volumes. Note also that cryptsetup can be used for
+managing more than just dm-crypt and luks. It also works with Truecrypt (much
+to my excitement a few months ago when I needed to extract some data from a
+Truecrypt volume, but didn't want to install it becuase of all the suspicion
+surrounding it lately).
+
+[[modifying-pam]]
+=== Modifying PAM
+
+[[etcpam.dsystem-auth]]
+==== /etc/pam.d/system-auth
+
+This piece assumes your distribution puts this file here and that it is named
+this. Unfortuantely, I can't really write this part to be distribution-agnostic
+as most of them do this differently to an extent. The contents of the file
+will likely look similar, despite its name. For anyone wondering though, this
+section is written from an Arch Linux instance.
+
+Open /etc/pam.d/system-auth in your favorite editor. Be sure to do this either
+with sudo or as root or you won't be able to save your changes.
+
+Here we need to put in calls to a module called pam_mount.so so it will be
+called at the right time to pass the user's password to the mount command,
+allowing for seamless encrypted home directory mounting. Pay attention to where
+the calls to pam_mount.so are. Order is very important in this file.
+
+NOTE: Many distributions use eCryptFS as their default encryption for home
+ directories. They do it this way as well, but using pam_ecryptfs.so
+ instead of pam_mount.so.
+
+./etc/pam.d/system-auth
+----
+#%PAM-1.0
+
+auth required pam_unix.so try_first_pass nullok
+auth optional pam_mount.so
+auth optional pam_permit.so
+auth required pam_env.so
+
+account required pam_unix.so
+account optional pam_permit.so
+account required pam_time.so
+
+password optional pam_mount.so
+password required pam_unix.so try_first_pass nullok sha512 shadow
+password optional pam_permit.so
+
+session optional pam_mount.so
+session required pam_limits.so
+session required pam_unix.so
+
+session optional pam_permit.so
+----
+
+
+[[etcsecuritypam_mount.conf.xml]]
+==== /etc/security/pam_mount.conf.xml
+
+This is the configuration file used by pam_mount when the user logs in.
+Depending on your distribution, it may or may not already be set up the way we
+need for this.
+
+Just before the +</pam_mount>+ at the end of the xml file, insert the following
+lines.
+
+./etc/security/pam_mount.conf.xml
+----
+...
+
+<volume fstype="crypt" path="/home/.%(USER)" mountpoint="/home/%(USER)" options="space_cache,autodefrag,compress=lzo" />
+<mkmountpoint enable="1" remove="true" />
+
+</pam_mount>
+----
+
+Before proceeding, there are a couple of assumptions that I need to mention
+about the way I do this here.
+
+. My home directories are all formatted with btrfs. If you're not using that,
+ then remove the *autodefrag,compress=lzo* piece in the options section.
+
+. The encrypted block device files are located at */home/.USERNAME* (note the
+ dot).
+
+
+[[creating-an-encrypted-home-per-user]]
+=== Creating an Encrypted Home Per User
+
+The creations of each user's home directory has a few fairly simple steps [if
+you've been using linux command line for a bit]. For the sake of more succinct
+directions, here we will assume a username of __kevin__.
+
+. Allocate user's encrypted home space (assuming 15 gigs)
+ * +dd if=/dev/zero of=/home/.kevin bs=1G count=15+
+ * This command writes 15 gigabytes of zeros to one file, /home/.kevin
+
+. Encrypt the user's home device
+ * +cryptsetup luksFormat /home/.kevin+
+ * This command will require the user to enter _their_ password when
+ prompted after running the command, as that will be what is passed to
+ the file container on login.
+
+. Open the user's new home device (you'll need the user to enter their password
+ again)
+ * +cryptsetup luksOpen /home/.kevin kevin+
+ * This will only be needed the first time around. Kevin can't use this
+ yet becasue it doesn't have a filesystem and it can't be mounted for the
+ same eason.
+
+. Format the opened dm-crypt device
+ * +mkfs.btrfs /dev/mapper/kevin+
+ * This is assuming you want to use btrfs. Otherwise you'd use mkfs.ext4
+ or some other filesystem of choice.
+
+. Cleanup
+ * +cryptsetup luksClose kevin+
+ * In this case, _kevin_ can be the alias given to the opened device on
+ luksOpen. You can also provide its path at /dev/mapper/kevin.
+
+
+[[how-it-works]]
+== How it Works
+
+When a user logs in, they type their username and password. Those are passed to
+pam, which verifies the user's identity using the _pam_unix.so_ module. If the
+credentials provided by the user are correct, the next step is to pass that
+username and password to the _pam_mount.so_ module. This module runs the
+commands dictated in the pam_mount.conf.xml. The commands pam mount runs (as
+per our earlier configuration) are effectively
+
+----
+cryptsetup luksOpen /home/.$\{username} _home__$\{username} mount /dev/mapper/_home__$\{username} /home/%\{username}
+----
+
+Those commands open the dot file (/home/.username) for the given user with the
+recently provided password. It then mounts that user's decrypted dot file at
+the user's home directory (/home/username).
+
+
+[[backups]]
+== Backups
+
+This kind of encryption makes backups a bit difficult to pull off as the
+administrator. Because you don't have each user's password, you can't back up
+their data. This leaves you with one option - back up the encrypted block
+devices themselves. Depending on how much space each user is given, this can
+take a long time (though rsync helps significantly with that) and a lot of
+space. This is the downside to
+https://wiki.archlinux.org/index.php/Encryption#Block_device_encryption[block
+device encryption].
+https://wiki.archlinux.org/index.php/Encryption#Stacked_filesystem_encryption[Stacked
+encryption] though, while rumored to be less secure for various reasons, allows
+administrators access to encrypted verions of each user's data. With stacked
+encryption, each individual file's contents are encrypted, but the user's
+filenames, paths, and file sizes are still accessible to the administrator(s)
+(hence the rumored security flaw).
+
+As a user though (if you're using this on your laptop for instance), backups
+are simple because the data itself is available to you (you have the password
+after all). This however assumes you have user rights on a remote server to
+rsync your data to. Even if the remote server has the same dm-crypt setup,
+rsync still sends your credentials, so your data can go from an encrypted
+laptop/desktop to an encrypted server.
+
+
+
+Category:Storage
+Category:Security
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii b/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii
new file mode 100644
index 0000000..58d6a70
--- /dev/null
+++ b/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii
@@ -0,0 +1,131 @@
+Linux Storage Devices, Partitions, and Mount Points Explained
+=============================================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Earlier today I was talking with a guy in the Ubuntu IRC channel (shout out to
+codemagician!) who was asking how to format a USB stick via the command line.
+Through explaining how it worked, I realized that to someone who isn't already
+very familiar with Linux, the way Linux handles drives can be very confusing,
+especially since you can control almost every step of the mounting process,
+unlike with Windows, which is why Windows is so easy (albeit less functional
+however).
+
+What do you say I do a post on how Linux handles storage devices? Yes? No?
+Great!
+
+[[the-quick-overview]]
+The Quick Overview
+------------------
+
+When you plug in a USB stick (for example) to your linux machine, it is
+assigned a device location (/dev/sd<something>). From there, that new device is
+assigned a mount point (assuming we are using Ubuntu here many Linux distros
+won't auto mount a storage device, even if it is internal). This mount point
+can be located anywhere, but typically is located in /media/. From the folder
+created in /media (or wherever the mountpoint is located), you can indirectly
+read and write data.
+
+[[the-dev-directory]]
+The /dev/ Directory
+-------------------
+
+The /dev/ directory is an interesting one to explain. I probably won't do it
+right, but I'll give it a shot either way. Dev is short for devices. If you
+run ls from within /dev/ you will likely see things like sda, sdb, hda, and
+more and more devices.
+
+What do these mean? Basically, each of the files listed in /dev/ is a direct
+pointer to either a physical or a virtual device. This part is actually super
+cool I think. Basically, when you transfer say, a picture, to your usb stick,
+the operating system literally writes the instructions for writing the file (in
+binary) to the device location/file (/dev/sdb for instance), which in turn
+writes it to the USB stick. You may say that's not that neat, but consider your
+audio device. When your music player (amarok, rhythmbox, etc) plays music, it
+literally streams the music file's uncompressed binary audio to the audio
+device file and that is in turn translated by the hardware driver and converted
+into speaker vibrations.
+
+You can actually try this by running a quickie command in the command line. The
+audio device is typically located at /dev/dsp. Pick a file on your hard drive
+that you want to "listen" to (it is likely going to sound like static), and run
+the following command. For this example, I'm going to use a jpeg image.
+
+----
+cat /home/username/Desktop/background.jpeg > /dev/dsp
+----
+
+What we just did there was to redirect the file contents of background.jpeg
+into the device pointed to by /dev/dsp. Mine sounds like static for some time
+(It's a really high resolution jpeg).
+
+If THAT isn't cool, I don't know what is.
+
+
+[[mount-points]]
+Mount Points
+------------
+
+Once your storage device is assigned a device location (IE: /dev/sdb), it then
+needs a mount point that interfaces with the device location. In a less
+complicated fashion, you need a folder that represents the drive. For
+instance, say you plug in your usb stick named Maverick (that's one I formatted
+last night). Ubuntu creates a temporary folder located at /media/Maverick/.
+That became the mount point for my usb stick. All a mount point is, generally
+speaking (I'll get into the technicalities of it in the next paragraph), is
+simply a folder that points to a device location. Ubuntu, Mint, as well as
+Debian all default to creating folders in /media/.
+
+
+[[so-what-do-mount-points-and-device-locations-have-anything-to-do-with-each-other]]
+So what do mount points and device locations have anything to do with each other?
+---------------------------------------------------------------------------------
+
+Here's where it gets pretty technical (so much so that I don't fully know how
+this works). Succinctly, a mount point provides an interface that your
+operating system uses to convert data into binary for writing directly to the
+device location. That means that when you copy a picture file to your usb stick
+(IE: /media/Maverick), your operating system converts it to binary and streams
+said binary to the device location associated (IE: /dev/sdb1) with that mount
+point.
+
+[[why-sdabcdef...]]
+Why sda,b,c,d,e,f...?
+---------------------
+
+The sd part of that stands for storage drive. The a, b, c, etc. is simply an
+incrementing value assigned to your drive. If you plug in a usb drive, it will
+be assigned sdb. If you plug in a second, it will be assigned sdc. If you plug
+in a third, it will be assigned sdd, and so on.
+
+[[how-do-you-explain-the-number-at-the-end-of-my-device-locations-ie-devsdb1]]
+How do you explain the number at the end of my device locations (IE: /dev/sdb1)?
+--------------------------------------------------------------------------------
+
+That number represents the partition. For instance, your local hard drive is
+device sda (presumably it was the first drive to be plugged in since your
+computer is running off of it). Your hard drive has partitions (these are like
+virtual sections in your hard drive with them you can divide your hard drive
+into one or more pieces mine is divided into 8 actually). Typically usb sticks
+only have one partition.
+
+That's all for now. I think I covered just about everything. If i missed
+anything, please let me know in the comments section and I'll add it on as soon
+as I get the chance.
+
+Now if you will all excuse me, I was at work at 2:00 this morning and
+desperately need sleep. Don't break too many things on your computer by
+redirecting file output to random devices now. I'm watching you...really.
+http://xkcd.com/838/[So is Santa].
+
+:)
+
+
+
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Migrating_from_Drupal_7_to_Habari_.8.ascii b/src/Migrating_from_Drupal_7_to_Habari_.8.ascii
new file mode 100644
index 0000000..1d2e5cf
--- /dev/null
+++ b/src/Migrating_from_Drupal_7_to_Habari_.8.ascii
@@ -0,0 +1,91 @@
+Migrating from Drupal 7 to Habari .8
+====================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Lately I've been trying out the latest release of
+http://habariproject.org/[Habari] and I really like it. They have created a
+very simple, yet functional and very clean interface with which to blog (not to
+mention its code implements the newest of just about everthing). With that,
+bitnode used to be run on Drupal, so converting from elenty billion 'articles'
+(that's the technical number) to posts in Habari was not looking too easy.
+After some searching, I found that the best way to convert without having to
+write some sql statements would be to migrate from Drupal 7 to Drupal 6, then
+from Drupal 6 to Wordpress 2.3; then from Wordpress 2.3 to Habari .8.
+
+What?
+
+So it seemed that manually copying the data from column to column with sql
+statements would be my best route. After some time (and so so many browser
+tabs), I finally came up with some queries that would migrate you from Drupal 7
+to Habari .8. Please keep in mind that these will not migrate all of your data.
+These are only for migrating your posts and their related comments.
+
+Assumptions:
+
+* Habari instance table prefix is habari_
+* Drupal instance table prefix is drupal_
+* Our author user id is 2
+
+
+----
+- Move our posts over using the drupal ids so we can relate our comments later
+insert into `habari_posts` (id, title, slug, content, user_id, status, pubdate, updated) select nid,title,title,body_value, 2, status, created, changed from drupal_node join drupal_field_data_body on drupal_node.nid=drupal_field_data_body.entity_id;
+----
+
+Here we are doing a simple insert into habari_posts from another table.
+However, due to Drupal's robust database structure (not sure if it's 3NF), we
+have to query another table for our remaining post data as the meta-data (post
+subject, various dates, status, etc) is stored in the drupal_node table and the
+actual post is stored in the drupal_field_data_body table.
+
+Once again, in this query I have statically defined user id 2. You will need to
+change this to your user's ID in Habari who you want to show up as posting
+everything. If you need to import multiple user's posts, you will need to query
+for the Drupal user IDs and change the Habari user IDs to match the posts
+(that's the easiest way).
+
+----
+- update our drupal published status to the habari version
+update habari_posts set status=2 where status=1;
+- update our drupal draft status to the habari version
+update habari_posts set status=1 where status=0;
+----
+
+Here we are just converting our post statuses from
+Drupal values to Habari values. In Habari, status 1 is published and
+status 0 is draft (as of 2011.12.30).
+
+----
+-Now we migrate our comments
+insert into habari_comments (post_id, name, email, url, ip, content, status, date) select nid, name, mail, homepage, hostname, comment_body_value, status, created from drupal_comment join drupal_field_data_comment_body on drupal_comment.cid=drupal_field_data_comment_body.entity_id;
+----
+
+Here we are grabbing the comments for each of the posts. Since we pulled in all
+the post IDs from the Drupal tables in our first query, we can do the same here
+and everything should line up perfectly. Once again, like with the posts,
+Drupal stores comment data in more than one table. In Drupal, the comment
+meta-data is stored in the drupal_comment table and the actual comment data is
+stored in the drupal_field_data_comment_body table.
+
+And that should be it. You've just migrated all of your post and comment data
+to Habari .8. If you have any images used in your posts, you will also need to
+copy Drupal's *sites/default/files/* directory to the root directory of your
+Habari instance.
+
+If anyone tries this out, please let me know how it worked for you. It worked
+fine for me (evidenced by the fact that bitnode is still viewable), but I'd
+like some input on how to better write these queries in case there are any
+additional fields I may have missed that people would be interested in. Thank's
+for reading!
+
+
+Category:Drupal
+Category:Habari
+Category:Blogging
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Mutt:Sorting_Mail_Like_a_Boss.ascii b/src/Mutt:Sorting_Mail_Like_a_Boss.ascii
new file mode 100644
index 0000000..ecf31f7
--- /dev/null
+++ b/src/Mutt:Sorting_Mail_Like_a_Boss.ascii
@@ -0,0 +1,61 @@
+Mutt:Sorting Mail Like a Boss
+=============================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Mutt is a relatively difficult mail client to learn. However, as most if not
+all mutt users will tell you, once you've got the hang of it, no other mail
+client can come close to matching the efficiency and speed of command line mail
+(or any thing else in command line for the most part). I recently started
+using mutt myself and just can't get over how efficient it is once you've got
+your configuration sorted out. Yesterday I easily cleaned out 800 emails in
+about five minutes using some very simple search terms (and no I didn't just
+delete randomly 800 emails). Unlike the title of this post implies though, I
+am not amazing with mutt, but what I do know, however, can get me around very
+quickly. Here's what I use nearly every day.
+
+
+[[tags]]
+=== Tags
+
+Mutt supports this neat thing called tagging. It's basically the command line
+equivelant of multiselect (ctrl+click or shift+click).
+
+**To tag a message**, move the cursor to it and hit the _t_ key.
+
+**To tag a group of emails based on a pattern**, for example "Change Requests",
+hit capital __T__. You will see at the bottom of your mutt window __Tag
+messages matching:__. Type your tag term, hit enter, and you should see several
+highlighted messages now.
+
+Finally, **to peform an action on all tagged messages**, preceed the command
+letter (s for save/move, d for delete, N for new, etc) with a semicolon ( ; ).
+For instance, do delete all tagged messages, type __;d__.
+
+
+[[limit-filter]]
+=== Limit (Filter)
+
+In mutt, you can do this really cool (though not novel in the slightest) thing
+wherein you filter the displayed messages by a regular expression. You don't
+have to use a regular expression of course, but never the less it can be done.
+
+**To limit/filter the currently displayed emails**, head over to a directory
+you want to get a better filtered look at and press the _l_ key. You will see
+at the bottom of your mutt window ''Limit to messages matching: ''. Type
+whatever you want to limit to.
+
+Note here though that limit by default only searches mail meta data unless
+otherwise specified. This makes searches go much faster since most of the time
+you're just cleaning up your inbox by subject, receivers, and date. If you do
+want to search the body of your emails, preceed your search term with __~B__,
+and mutt will go the extra mile and search email bodies for you.
+
+
+Category:Linux
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/My_.bashrc.ascii b/src/My_.bashrc.ascii
new file mode 100644
index 0000000..938e44b
--- /dev/null
+++ b/src/My_.bashrc.ascii
@@ -0,0 +1,40 @@
+Not that any of you care that much, but I thought I might post my .bashrc file
+as it evolves in case anyone is looking for something I have in mine. I have
+made quite a few of them and ultimately end up cleaning them out entirely on
+occasion to start over so I can keep it clean with only the things I need.
+
+That said, here's what we've got so far. The initial contents at the top are
+from the Arch Linux skel file.
+
+I'll keep updating this as I make changes.
+
+----
+#
+# ~/.bashrc
+#
+
+# If not running interactively, don't do anything
+[[ $- != *i* ]] && return
+
+alias ls='ls --color=auto'
+PS1='[\u@\h \W]\$ '
+
+# My modifications
+export TERM=xterm-256color
+
+# This will spawn a new tmux instance as our "shell" if tmux isn't already
+# running
+if [[ -z $TMUX ]]; then
+ exec tmux
+else
+ echo
+fi
+----
+
+
+
+Category:SSH
+Category:Bash
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii b/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii
new file mode 100644
index 0000000..b696e5b
--- /dev/null
+++ b/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii
@@ -0,0 +1,51 @@
+Running Load Tests with a Remote VSTS Controller and Associated Agent
+=====================================================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Running a multi-agent load test isn't too complicated, compliments of Visual
+Studio Team Suite. Assuming you have a controller/agent environment set up,
+running the actual test requires a bit of modification to the test project so
+the local test machine doesn't run the test itself; rather it runs them on a
+remote machine (the controller and its agents). To set this up...
+
+[[load-in-your-test-project]]
+Load in your test project
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+image:files/01_Open_Test.jpg[height=300]
+
+* At the top of the window, select *Test -> Edit Test Run Configurations ->
+ Local Test Run (localtestrun.testrunconfig)*
+ +
+ image:files/02_Edit_Test_Run_Configurations.jpg[height=250]
+
+* Select *Controller and Agent* at the top left. Select the '''Remote''' radio
+ button. Select the controller.
+ +
+ image:files/03_Select_Controller.jpg[height=350]
+
+* Click **Apply**. Once you have selected Apply, you will receive a prompt
+ saying
+ +
+ image:files/04_Answer_Prompt.jpg[height=140]
+
+* Click *Ok*
+
+* Click *Close*
+
+Once all of that is done, it's time to run your test. You'll notice that once
+your test has been run, at the bottom left side of the results you'll see a
+reference to the remote controller and that it controlled X many agents.
+
+Happy Testing!
+
+
+Category:Microsoft
+Category:Visual_Studio
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/SQL_2008_Reinstall_Errors.ascii b/src/SQL_2008_Reinstall_Errors.ascii
new file mode 100644
index 0000000..2cb6715
--- /dev/null
+++ b/src/SQL_2008_Reinstall_Errors.ascii
@@ -0,0 +1,91 @@
+SQL 2008 Reinstall Errors
+=========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Hello again all, Recently, after the server build was 'finished', I discovered
+that the SQL install was not configured to use the proper authentication method
+or service accounts (oops) and without mixed mode authentication enabled,
+windows authentication could not be used to log in to sql to fix these things.
+That being said, I had to uninstall SQL 2008 (standard edition) and do a
+reinstall to correct these issues. Time to grab some popcorn and a drink and
+sit back to watch that entertaining progress bar as it slowly creeps across the
+tiny 800x600 virtual console window.
+
+I configured the SQL install and ran into an ambiguous error (how typical).
+
+----
+This access control list is not in canonical form and therefore cannot be modified.
+----
+
+How quaint. Thankfully, after searching for a few minutes with our friend
+Google, I stumbled upon a Microsoft feedback article that seemed to contain my
+answer.
+
+Here's what needs to be done.
+
+Navigate in an explorer window to
+
+C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log
+
+The link at the end of this post to the Microsoft feedback article says that
+from there you open up the "**details.txt**" file. I found that that folder
+actually contained 11 folders (from the current install and the previous
+install) and a file called "summary.txt". I found the right "**details.txt**"
+file in the most recently created folder.
+
+Once you've located the right "details.txt" file, open it up in notepad (or
+your editor of choice) and scroll to the end of the file (it's pretty big so
+use the scroller bar). Near the end, you should see some text that looks
+similar to...
+
+----
+2009-05-30 18:02:40 Slp: Sco: Attempting to set directory full path
+2009-05-30 18:02:40 Slp: Sco: Attempting to normalize directory path C:Program FilesMicrosoft SQL Server100COM
+2009-05-30 18:02:40 Slp: Sco: Attempting to check if directory C:Program FilesMicrosoft SQL Server100COM exists
+2009-05-30 18:02:40 Slp: Sco: Attempting to set security descriptor for directory C:Program FilesMicrosoft SQL Server100COM, security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
+2009-05-30 18:02:40 Slp: Sco: Attempting to check if directory C:Program FilesMicrosoft SQL Server100COM exists
+2009-05-30 18:02:40 Slp: Sco: Attempting to normalize security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
+2009-05-30 18:02:40 Slp: Sco: Attempting to replace account with sid in security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
+2009-05-30 18:02:40 Slp: ReplaceAccountWithSidInSddl -SDDL to be processed: D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
+2009-05-30 18:02:40 Slp: ReplaceAccountWithSidInSddl -SDDL to be returned: D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
+2009-05-30 18:02:40 Slp: Prompting user if they want to retry this action
+----
+
+The text you're looking for is the directory path listed after the text
+
+----
+Attempting to normalize directory path
+----
+
+Open up another explorer window and navigate to (not inside) the directory that
+is specified after the previous quote. Right click the directory (in this case,
+the directory is COM within the directory 100) and select "*" tab. Windows
+should give you an error that says something along the lines of the permissions
+being out of order and might not be effective (sorry...I forgot to copy that
+error).
+
+Click "*" window to close it out as well.
+
+Go back to your installer now and click "*" on the error window.
+
+I had to fix two directories. The guy in the Microsoft feedback article said he
+had to fix five directories. That being said, this may need to be done more
+than once.
+
+That about sums this up. The article I found that helped me get started fixing
+this can be found here:
+
+http://connect.microsoft.com/SQLServer/feedback/ViewFeedback.aspx?FeedbackID=355216
+
+Dirk
+
+
+Category:Microsoft
+Category:MsSQL
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/SQL_Server_2008_Memory_Management.ascii b/src/SQL_Server_2008_Memory_Management.ascii
new file mode 100644
index 0000000..4125534
--- /dev/null
+++ b/src/SQL_Server_2008_Memory_Management.ascii
@@ -0,0 +1,72 @@
+SQL Server 2008 Memory Management
+=================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Once again, hello all: Recently I had a problem with SQL server. I was sifting
+through the processes in Task Manager a few days ago ordered by memory
+consumption. At the top of the list for memory consumption was SQL Server
+(sqlserver.exe) weighing in at 200 megabytes of memory. I decided to look past
+that one since 200 megabytes isn't too unreasonable for SQL, especially when
+it's hosting the data for quite a few SharePoint web applications.
+
+Today, I checked again. After my server had been online for two and a half
+days, SQL server had grown to over 650 megabytes of memory (653,224 KB
+specifically). Seeing as how I have not made any changes to my local SharePoint
+environment in that time (I'm currently developing a non-SharePoint related
+project), I decided to look into putting a cap on the memory consumption of
+SQL. Originally I had 2 gigabytes of ram for my server. I added an extra
+gigabyte to that and SQL took up the additional space.
+
+As it turns out, one can put a maximun and a minimum limit on SQL. Here's how.
+
+Open up SQL Server Management Studio 2008
+
+Type in the information to connect to the server that has SQL server running on
+it and click connect.
+
+Right click the server name
+
+image:files/MgmtStudio1.jpg[height=400]
+
+Click Properties
+
+Select Memory on the left side of the window that comes up
+
+image:files/MgmtStudio2.jpg[height=400]
+
+Under Server Memory Options, adjust the minimum and maxiumum memory settings to
+what you need.
+
+Click OK
+
+Right Click the server name again
+
+Select Stop from the menu
+
+Click necessary buttons to get through the prompts
+
+Right Click the server name yet again
+
+Select Start from the menu
+
+Click the necessary buttons to get through the prompts
+
+And that's it. Mine (as in the screenshots) has yet to go over 300 megabytes of
+memory consumption.
+
+Thanks for reading.
+
+
+Dirk
+
+
+
+Category:Microsoft
+Category:MsSQL
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Securing_a_Postfix_Smtp_Server.ascii b/src/Securing_a_Postfix_Smtp_Server.ascii
new file mode 100644
index 0000000..79f20f5
--- /dev/null
+++ b/src/Securing_a_Postfix_Smtp_Server.ascii
@@ -0,0 +1,226 @@
+Securing a Postfix Smtp Server
+==============================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+I must start this post with the acknowledgement that I know only what I've
+experienced on this topic.
+
+I recently set up my own mail server for the fun of it. I figured it was
+something I'd never done, so why not, right?
+
+Well, one day later, spammers discovered my server and began using it to send
+out spam mail (curse you spammers!). I didn't notice this until I received a
+notification from my hosting provider that my network IO was over the threshold
+I had set. I promptly logged in, tailed the mail logs and discovered
+unbelievable amounts of mail being rejected by Google, Yahoo, Aol, and Hotmail.
+Why? Spam.
+
+With that, I spent the next day figuring out how to better secure my smtp
+server. I'd like to detail some of the exploits that the spammers used to get
+in to my server, how I failed in configuring my server properly, and how I
+fixed it.
+
+[[leaving-an-open-relay]]
+Leaving an Open Relay
+~~~~~~~~~~~~~~~~~~~~~
+
+An open relay is basically an smtp server that requires no authentication
+and/or allows connections from outside ip addresses, so anyone can send emails
+from anywhere to anywhere. The settings in question specific to this issue in
+my configuration were the following:
+
+----
+smtpd_recipient_restrictions = permit_mynetworks, check_relay_domains
+...
+mynetworks = 0.0.0.0/0 127.0.0.0/8 [::fff:127.0.0.0]/104 [::1]/128
+----
+
+Basically that is an open relay. Here's why.
+
+* Firstly, *smtpd_recipient_restrictions = permit_mynetworks* allows any
+ email to be sent without any restrictions as long as the email originated
+ from a box in the IP ranges specified in the mynetworks variable.
+
+* Secondly, *mynetworks = 0.0.0.0/0* allows emails to be sent through my
+ smtp server from any client within the ip range of 0.0.0.0-255.255.255.255.
+ This is bad because any computer can try to send emails through my smtp
+ server and succeed because of the permit_mynetworks restriction (or lack
+ therof).
+
+[[specifying-incorrect-configuration-parameters]]
+Specifying Incorrect Configuration Parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+One of my first mistakes when configuring Postfix was misspelling some smtpd
+parameters using smtp_ instead of smtpd_ to prefix them. As it turns out, if
+you do this, Postfix ignores your attempted configuration without a peep. This
+one went on for a long time before I noticed that two of my smtpd_ fields were
+missing the 'd'. As soon as I put those in there, everything started working as
+it should, albeit still insecure, but at least it was following the
+specifications of my config file.
+
+
+[[not-specifying-a-correct-smtpd_sasl_path]]
+Not Specifying a Correct smtpd_sasl_path
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This one took me a while. The *smtpd_sasl_path* is a path to the socket file
+for your SASL server. In my case, this is Dovecot.
+
+As it turns out, Postfix defaults to running in chroot mode which makes its
+root directory /var/spool/postfix/. This was my first mistake. I was specifying
+
+----
+smtpd_sasl_path = /var/spool/postfix/private/auth-client
+----
+
+and it was not starting up because it couldn't find the socket file. This was
+because it was looking for the file at
+/var/spool/postfix/var/spool/postfix/private/auth-client a path which clearly
+does not exist. The solution to this is to simply specify a relative path.
+
+----
+smtpd_sasl_path = private/auth-client
+----
+
+I decided that I would get smart though and shave off some text from the field
+value by configuring Dovecot to place the socket file at
+/var/spool/postfix/auth-client rather than at
+/var/spool/postfix/private/auth-client (speaking in absolute terms despite
+running in chroot mode). This returned the following error
+
+----
+warning: when SASL type is "dovecot", SASL path "auth-client" should be a socket pathname
+----
+
+As it turns out, postfix won't operate with the SASL socket file path outside
+of the private directory. So with that, I placed my auth-client file back in
+the private directory and Postfix started up fine.
+
+
+[[not-specifying-the-allowed-senders-file]]
+Not Specifying the Allowed Senders File
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Even if you do have authentication required, you still need to specify which
+users can send email with what addresses. This was a bit of a surprise to me
+initially because I was under the impression that a password is associated with
+an email address, not an email address(s) associated with a username and
+password. To keep users from being able to send email as addresses that are not
+theirs (specifically randomly generated addresses in my case), you need to
+create a mapping file that maps usernames to the addresses they are authorized
+to send mail as. In my case, this is a one to one relationship (one address per
+username). Before my example I'd like to note that the filename is not
+required to be the one I use (though my filename is the one used in the Postfix
+setup documentation).
+
+Okay. Let's create the map file. To do this, open up and edit
+/etc/postfix/controlled_envelope_senders (this file likely doesn't exist yet)
+
+----
+vim /etc/postfix/controlled_envelope_senders
+----
+
+Once you've got that open, you simply need to put the maps in there.
+
+----
+# envelope sender owners jcricket@example0.com jimminey
+----
+
+Now that we've done that, we need to turn it into a binary. Run the following
+command and it will generate a <filename>.db binary map file in the same
+directory as the original file.
+
+----
+postmap /etc/postfix/controlled_envelope_senders
+----
+
+Presto! Now the user jimminey can send email as jcricket@example0.com. However,
+so can everyone else...still.
+
+Now that we have our controlled envelope senders file, we need to reference it
+in our postfix main.cf and set postfix up to restrict access to the maps
+specified in that file. Crack er open in your favorite editor and put the
+following line in somewhere after *smtpd_sasl_auth_enable*
+
+----
+smtpd_sasl_auth_enable = yes
+...
+# This line specifies our map file for use by postfix
+# Note that this does NOT reference controlled_envelope_senders.db
+smtpd_sender_login_maps = hash:/etc/postfix/controlled_envelope_senders
+# This line sets postfix to reject anyone who authenticates but tries to send email as an address they aren't permitted to use
+smtpd_recipient_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject_unauth_destination
+----
+
+So what we've just done is tell Postfix where our map file is
+(smtpd_sender_login_maps). After that, we tell Postfix to reject any users that
+have been authenticated but are trying to send with an address they aren't
+authorized to send with in our map file (smtpd_recipient_restrictions). Please
+note that *reject_sender_login_mismatch* comes at the beginning of the
+smtpd_recipient_strictions field. This is key. It is so key in fact, that I
+missed it (I only miss the key stuff of course thanks Murphy). This was the
+forth exploit attempt that got me.
+
+
+[[misordering-smtpd_recipient_restrictions]]
+Misordering smtpd_recipient_restrictions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This one is the final bit that let the spammers in (so far at least).
+
+The smtpd_recipient_restrictions are restrictions that you can place on
+the users and their emails based on various things. In my case, I had
+the following restrictions string
+
+----
+smtpd_recipient_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_sender_login_mismatch, reject_unauth_destination
+----
+
+Postfix applies these restrictions in the order in which they are specified. As
+they put it <blockquote>Restrictions are applied in the order as specified; the
+first restriction that matches wins.</blockquote> As soon as one restriction
+matches, then the ones that follow don't get applied. This was very
+problematic because in my case permit_mynetworks is first. So that I can log
+in from my cell phone which has an IP address that changes, I set
+
+----
+mynetworks = 0.0.0.0/0 127.0.0.0/8 [::fff:127.0.0.0]/104 [::1]/128
+----
+
+which allows any IP address to connect to my SMTP server. Since Postfix takes
+the first match and goes no further and any IP address is in 0.0.0.0/0, anyone
+can send mail through my SMTP server. This = bad.
+
+What you should do is start your restrictions with the the most strict
+restrictions followed by the less strict. In my case, that looks like
+
+----
+smtpd_recipient_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject_unauth_destination
+----
+
+In the event someone tries to send an email, first they must login. If they
+don't log in, they are rejected due to reject_sender_login_mismatch (we can't
+do a match if we don't have a sender username). Secondly, once logged in, the
+user must be authorized to use the address they are trying to send as as
+specified in the smtpd_sender_login_maps line. Finally, once the user has been
+authenticated and they have permissions to use the address they are trying to
+send as, their email is not rejected. It follows that they are then filtered
+through permit_sasl_authenticated. This basically runs a check to see if they
+are authenticated (which we know they already are because of the previous
+filter) and since they are, they are permitted and Postfix stops looking for
+more matches because it's found one that permits the user to perform their
+requested action.
+
+As chef Elzar says, "Bam!"
+
+
+Category:Linux
+Category:Postfix
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Server_Administration:Firewalls.ascii b/src/Server_Administration:Firewalls.ascii
new file mode 100644
index 0000000..9c7450f
--- /dev/null
+++ b/src/Server_Administration:Firewalls.ascii
@@ -0,0 +1,41 @@
+Server Administration:Firewalls
+===============================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Hello again all, The majority of the servers that I manage have to communicate
+with many other servers I manage for various reasons. Inevitably, I have many
+firewall blocking issues. Despite my age, I still have trouble remembering all
+of those commonly used ports and what they are for. That being said, this post
+will list all of the Windows default firewall ports used for the various
+software sources. Yes these are all readily available at other sites. This is
+simply a central collection.
+
+I will update this post when I have more blocks come up.
+
+Post comments if you would like to have a particular port added to the list.
+
+[cols=",",width="50%"]
+|===================================================
+|FTP |21
+|HTTP |80
+|HTTPS |443
+|POP3 |110
+|SMTP |25
+|SQL Server Management Studio (remote connect) |1433
+|Terminal Services |3389
+|VMWare Server Administration |8222
+|VMWare Server Administration (Secure) |8333
+|===================================================
+
+Let me know in the comments section if there are any ports you would like added
+to this list.
+
+
+Category:Networking
+
+
+// vim: set syntax=asciidoc:
diff --git a/src/Sidebar.ascii b/src/Sidebar.ascii
new file mode 100644
index 0000000..f05a6be
--- /dev/null
+++ b/src/Sidebar.ascii
@@ -0,0 +1,5 @@
+- Navigation
+ - link:index.html[Main]
+ - link:about.html[About]
+
+// vim: set syntax=asciidoc:
diff --git a/src/Team_Password_Management.ascii b/src/Team_Password_Management.ascii
new file mode 100644
index 0000000..b48fb36
--- /dev/null
+++ b/src/Team_Password_Management.ascii
@@ -0,0 +1,112 @@
+Team Password Management
+========================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+A while back I started looking for alternate means to manage my passwords,
+specifically because I started playing more with pgp encryption. I thought it'd
+be neat to be able to use pgp to encrypt a password database and/or use git to
+version the passwords. It turns out that someone had the idea before I did: the
+developers of password-store.
+
+Password-store, or pass, is a [very impressive] command line bash script that
+uses git to version passwords, and pgp keys to encrypt/decrypt each password.
+Specifically to this post though, it implements support for something that
+pgp/gpg supports: the --encrypt-to switch.
+
+
+== gpg --encrypt-to
+
+The --encrypt-to switch for the gpg command allows for encryption of the given
+stream to multiple recipients. For the purposes of password management, it
+allows for each user of the password database to add their pgp key to the
+_.gpg-id_ file. The effect is that each subsequent save of the given password
+re-encrypts it using every pgp key listed in the .gpg-id file.
+
+Effectively, each user of the password repo can have their own password (the
+password to their pgp privat key), whilst not knowing the passwords other
+members are using. This means that if for example, an employee leaves the
+company, the remaining repo members can just remove that person's key from the
+\.gpg-id file, and all further changes (regenerations) of the passwords will
+not be encrypted with the departed employee's key, thus revoking their access.
+
+
+== Setup
+
+Setup for this is fairly simple, if you're accustomed to using git and gpg/pgp.
+The commands for pass are very intuitive.
+
+To create a pass database (assuming you already have it installed), execute...
+
+----
+pass init user@gpg-id-to-be-used.com
+----
+
+To add other user's pgp keys, just add their ids to the .gpg-id file located at
+\~/.password-store/.gpg-id. Each password created after that will be encrypted
+to each user listed in that file.
+
+Note: Remember that each key that you're adding to the .gpg-id file must at
+ least have marginal trust in gpg.
+
+
+== Questions
+
+=== What about arbitrary users adding themselves to .gpg-id?
+
+The nice thing about gpg is that it will not allow usage of the --encrypt-to
+switch (amongst other switches) without a measure of trust given the key in
+question. This means that if any user does add their key to the .gpg-id file,
+every subsequent password change will yield an error, indicating that the
+password file cannot be encrypted to the given untrusted key.
+
+Another perk to pass is that it versions all changes to the password "database"
+in git, so the user who added their key to the .gpg-id file will have left a
+log entry (assuming they didn't rewrite history to conceal their subterfuge),
+and thus they can be dealt with appropriately.
+
+
+=== What if I want to run more than one database?
+
+Add the following to your .bashrc file.
+
+----
+#
+# Function to override calls to pass binary. Allows for multiple password-store
+# backends. Based on the first argument given to "pass", selects a different
+# password store backend.
+#
+# Example Usage:
+# # Will edit default password-store foo
+# pass edit foo
+#
+# # Will edit alternate, team, password-store foo
+# pass team edit foo
+#
+function pass {
+ alt='team'
+ if [[ ${1} == ${alt} ]]; then
+ export PASSWORD_STORE_DIR=~/.password-store.${alt}
+ # Shift off the first argument
+ shift
+ else
+ export PASSWORD_STORE_DIR=~/.password-store
+ fi
+
+ # Call the actual binary
+ /usr/bin/pass ${@}
+}
+----
+
+That will override calls to the pass binary (usually /usr/bin/pass),
+intercepting the first argument. If the first argument is team, it will look in
+\~/.password-store.team for passwords. If the first argument is not team, then
+it looks in the default location, ~/.password-store.
+
+
+Category:Security
+Category:Encryption
+// vim: set syntax=asciidoc:
diff --git a/src/Updating_SSH_Keys_Across_an_Environment.ascii b/src/Updating_SSH_Keys_Across_an_Environment.ascii
new file mode 100644
index 0000000..becdea2
--- /dev/null
+++ b/src/Updating_SSH_Keys_Across_an_Environment.ascii
@@ -0,0 +1,347 @@
+Updating SSH Keys Across an Environment
+=======================================
+:author: Aaron Ball
+:email: nullspoon@iohq.net
+
+
+== {doctitle}
+
+Most Linux environments with any number of servers uses keys to perform tasks
+from simple manual administration to gathering manifests, backing up config
+files across an environment, and really any kind of automation. Why? Because
+passwords are terrible things (how's that for indignant). Seriously though,
+despite the risks passwords present with minimum constraints not being
+appropriately set or enforced, at least passwords make authentication and
+semi-secure security accessible to people. Of course, keys are preferable, but
+not reachable for the general public. Enough about my philosophical ramblings
+about security though. I have several servers that I run (including this one)
+that all use keys almost exclusively for logins. Like passwords, keys should be
+cycled through frequently as well and if you have things set up right, that
+should be completely painless. Here's the script I wrote to bulk change
+passwords across my entire environment.
+
+----
+#!/usr/bin/env bash
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# @author Nullspoon <nullspoon@iohq.net>
+#
+
+manifest=''
+key=''
+action=''
+id=''
+user=''
+
+#
+# Backups by a specific ssh key to <date_modified>.<key_name>
+#
+# @param ssh_base string Path to where the ssh keys and configs are stored
+# @param key string Name of the key to backup
+#
+# @return string The filename of the key backup
+#
+function backupKeys {
+ local ssh_base=$1
+ local key=$2
+ moved=0;
+ date=""
+ priv_ls=$(ls -l --time-style='+%Y%m%d%H%M%S' ${ssh_base}/${key})
+ date=$(echo "${priv_ls}" | tr -s ' ' | cut -d ' ' -f 6);
+ # Rename the old key
+ if [[ -e "${ssh_base}/${key}" ]]; then
+ mv ${ssh_base}/${key} ${ssh_base}/${date}.${key}
+ moved=1;
+ fi
+ # Rename the old public key
+ if [[ -e "${ssh_base}/${key}.pub" ]]; then
+ mv ${ssh_base}/${key}.pub ${ssh_base}/${date}.${key}.pub
+ moved=1;
+ fi
+ if [[ ${moved} == 0 ]]; then
+ echo ''
+ else
+ chmod 700 ${ssh_base}/*
+ echo "${ssh_base}/${date}.${key}"
+ fi
+}
+
+#
+# Pushes specific public key to remote user's authorized_keys
+#
+# @param user string User owning the authorized_keys file to be modified
+# @param server string Server the user's authorized_keys file is on
+# @param old_key string The key to use for authentication
+# @param new_key string The key, public or private, to be pushed
+#
+function pushKey {
+ local conn=$1
+ local old_key=$2
+ local new_key=$3
+ if [[ ${#new_key} -lt '4' ]]; then
+ echo "Key to be pushed is not a public key."
+ exit
+ fi
+
+ ispub=$(keyIsPublic ${new_key})
+ if [[ ${ispub} == 0 ]]; then
+ # Append .pub because a public key wasn't specified
+ new_key="${new_key}.pub"
+ fi
+
+ local cmd="if [[ ! -d ~/.ssh/ ]]; then mkdir ~/.ssh/; fi"
+ cmd="${cmd} && echo '$(cat ${new_key})' >> ~/.ssh/authorized_keys"
+
+ # Unset our identity file if it doesn't exist
+ local id_file="-i ${old_key}"
+ if [[ ${old_key} == '' ]]; then
+ id_file=''
+ fi
+ contents=$(cat ${new_key})
+ ssh -q ${id_file} ${conn} "${cmd}"
+}
+
+#
+# Removes the specified public key from a remote user's authorized_keys file
+#
+# @param user string User owning the authorized_keys file to be modified
+# @param server string Server the user's authorized_keys file is on
+# @param key string The key to use for authentication which is to be removed
+#
+function removeRemoteKey {
+ local conn=$1
+ local key=$2
+ pub_key=''
+ priv_key=''
+ ispub=$(keyIsPublic ${key})
+ if [[ ${ispub} == 0 ]]; then
+ priv_key="${key}"
+ pub_key="${key}.pub"
+ else
+ priv_key="${key:0:-4}"
+ pub_key="${key}"
+ fi
+ contents=$(cat "${pub_key}")
+ local cmd="if [[ ! -d ~/.ssh/ ]]; then mkdir ~/.ssh/; fi"
+ cmd="${cmd} && cat ~/.ssh/authorized_keys | grep -v '${contents}' "
+ cmd="${cmd} > ~/.ssh/auth_keys"
+ cmd="${cmd} && mv ~/.ssh/auth_keys ~/.ssh/authorized_keys"
+ ssh -q -i ${priv_key} ${conn} "${cmd}"
+}
+
+#
+# Determines if the specified key is public (or not which would be private).
+#
+# @param key string Path to the key to check
+#
+# @return int Whether or not the key is public
+#
+function keyIsPublic {
+ key=$1
+ if [[ ${#key} -lt '4' ]]; then
+ echo 0;
+ fi
+ # Check the extension
+ ext=${key:$((${#key}-4)):${#key}}
+ if [[ ${ext} == '.pub' ]]; then
+ echo 1;
+ fi
+ echo 0
+}
+
+#
+# Generates a new ssh key of the length 4096
+#
+# @param filepath string Path to where the new ssh key will be written
+# @param bits int Number of bits in the new key (eg: 2048, 4096, 8192, etc.)
+#
+function genKey {
+ local filepath=$1
+ local bits=$2
+ ssh-keygen -b ${bits} -f "${filepath}" -N ''
+}
+
+#
+# Prints the help text
+#
+function getHelp {
+ echo
+ echo -n "Manages ssh keys en masse. Designed to perform pushes, "
+ echo " removals, and creations of ssh keys on lists of servers."
+ echo
+ echo "Usage: keymanage.sh action --manifest systems.list"
+ echo -n " -m, --manifest Text file containing a list of systems, "
+ echo "delimited by new lines."
+ echo -n " [-k, --key] Path to a key to perform an action "
+ echo "(push or remove) with."
+ echo -n " [-i, --id] Key to use for automated logins. Not "
+ echo "used when performing an update."
+ echo -n " [-u, --user] Username on remote systems to work on "
+ echo "(defaults to root)."
+ echo
+}
+
+function parseArgs {
+ argv=(${@})
+ # Parse the arguments
+ for(( i=0; i<${#argv[*]}; i++ )); do
+ if [[ ${argv[$i]} == '-h' || ${argv[$i]} == '--help' ]]; then
+ getHelp
+ exit
+ elif [[ ${argv[$i]} == '-m' || ${argv[$i]} == '--manifest' ]]; then
+ manifest=${argv[$i+1]}
+ i=$(( ${i} + 1 ))
+ elif [[ ${argv[$i]} == '-k' || ${argv[$i]} == '--key' ]]; then
+ key=${argv[$i+1]}
+ i=$(( ${i} + 1 ))
+ elif [[ ${argv[$i]} == '-i' || ${argv[$i]} == '--id' ]]; then
+ id=${argv[$i+1]}
+ i=$(( ${i} + 1 ))
+ elif [[ ${argv[$i]} == '-u' || ${argv[$i]} == '--user' ]]; then
+ user=${argv[$i+1]}
+ i=$(( ${i} + 1 ))
+ else
+ action=${argv[$i]}
+ fi
+ done
+
+ # Enforce some business rules
+ echo
+ exit=0;
+ if [[ ${action} == '' ]]; then
+ echo "Please specify an action.";
+ echo " Available actions: push, remove, update."
+ echo
+ exit=1;
+ fi
+ if [[ ${manifest} == '' ]]; then
+ echo "Please specify a manifest file."
+ echo " Example: keymanage.sh action [-m|--manifest] ./systems.txt"
+ echo
+ exit=1;
+ fi
+ if [[ ${exit} == 1 ]]; then
+ exit
+ fi
+}
+
+#
+# Determines the path to the parent directory containing a file.
+#
+# @param filepath string Path to the file to get the parent directory for
+#
+# @return string Path to the file's parent directory
+#
+function getFilePath {
+ filepath=$1
+ filename=$(basename ${filepath})
+ echo ${filepath} | sed "s/\(.*\)${filename}/\1/"
+}
+
+#
+# Push main function. One param because the rest are global
+#
+function keyPush {
+ argv=( ${@} )
+ if [[ ${id} == '' ]]; then
+ echo "No identity file specified (-i). This will likely be painful."
+ fi
+ for (( i=0; i<${#argv[*]}; i++ )); do
+ dest=${argv[$i]}
+ if [[ ${id} == '' ]]; then
+ pushKey "${dest}" '' ${key}
+ else
+ pushKey "${dest}" ${id} ${key}
+ fi
+ echo "Key ${key} added for ${dest}."
+ done
+}
+
+#
+# Update main function. One param because the rest are global
+#
+function keyUpdate {
+ argv=( ${@} )
+ ssh_base=$(getFilePath ${key})
+ filename=$(basename ${key})
+ # Backup our old key
+ backup_key="$(backupKeys ${ssh_base} ${filename})"
+
+ # Let's get to work on that new key
+ genKey "${key}" 4096
+
+ for (( i=0; i<${#argv[*]}; i++ )); do
+ dest=${argv[$i]}
+ if [[ ${backup_key} == '' ]]; then
+ echo "No current key exists."
+ echo "Skipping backup and removal from remote."
+ # Push the new key
+ pushKey "${dest}" '' ${key}
+ else
+ # Push the new key
+ pushKey "${dest}" ${backup_key} ${key}
+ # Clean up the old key from our remote
+ removeRemoteKey "${dest}" "${backup_key}"
+ fi
+ echo "Key ${key} updated for ${dest}."
+ done
+}
+
+#
+# Remove main function. One param because the rest are global
+#
+function keyRemove {
+ argv=( ${@} )
+ for (( i=0; i<${#argv[*]}; i++ )); do
+ dest=${argv[$i]}
+ removeRemoteKey "${dest}" "${key}"
+ echo "Key ${key} removed from ${dest}."
+ done
+}
+
+#
+# The main function
+#
+function main {
+ # Parse our script args
+ # Believe me, this is a lot better than the alternatives
+ parseArgs ${@}
+
+ destinations=( $(cat ${manifest}) )
+ # Key required
+ if [[ ${key} == '' ]]; then
+ echo -n "Please specify a key (-k) to ${action}."
+ echo
+ exit
+ fi
+
+ # Let's start doing stuff
+ if [[ ${action} == 'push' ]]; then
+ keyPush ${destinations[*]}
+ elif [[ ${action} == 'update' ]]; then
+ keyUpdate ${destinations[*]}
+ elif [[ ${action} == 'remove' ]]; then
+ keyRemove ${destinations[*]}
+ fi
+}
+
+main ${@}
+----
+
+
+Category:Linux
+
+
+// vim: set syntax=asciidoc:

Generated by cgit