summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAaron Ball <nullspoon@iohq.net>2015-07-04 14:14:41 -0600
committerAaron Ball <nullspoon@iohq.net>2015-07-17 08:58:46 -0600
commit1885394214392349a92eaa959e5f6acdffcd2ca2 (patch)
tree77772c8eba7ba2b30805c81827eef36d47157974 /src
parent555db1fb0a22d9e0af9944504feb0ba5d759e926 (diff)
downloadoper.io-1885394214392349a92eaa959e5f6acdffcd2ca2.tar.gz
oper.io-1885394214392349a92eaa959e5f6acdffcd2ca2.tar.xz
Restructured all posts
Diffstat (limited to 'src')
-rw-r--r--src/A_Usual_Opening.ascii57
-rw-r--r--src/About.ascii92
-rw-r--r--src/AdBlock_Only_kind_of_Blocks_Ads.ascii30
-rw-r--r--src/Android:Configuring_Hotmail_Exchange.ascii88
-rw-r--r--src/Android:My_Phone_Configuration.ascii147
-rw-r--r--src/Android_Client_and_Sync_with_ownCloud_on_NGINX.ascii61
-rw-r--r--src/Android_Screen_Density.ascii90
-rw-r--r--src/Aol_Email_Hacked.ascii237
-rw-r--r--src/Apache:Listening_Port.ascii60
-rw-r--r--src/Attached_Devices_and_VPS_(OpenVZ_and_Virtuozzo).ascii21
-rw-r--r--src/Backing_up_a_Server_Remotely_Using_Minimal_Bandwidth.ascii66
-rw-r--r--src/Bash:Lesser_Known_Bits.ascii139
-rw-r--r--src/Benchmarks:Toshiba_Canvio_Slim.ascii60
-rw-r--r--src/Benchmarks:WD_Elements.ascii115
-rw-r--r--src/Blog_Resurrection.ascii48
-rw-r--r--src/Btrfs:Balancing.ascii87
-rw-r--r--src/Btrfs:RAID_5_Rsync_Freeze.ascii91
-rw-r--r--src/Btrfs:RAID_Setup.ascii165
-rw-r--r--src/Building_an_Ejabberd_Server_with_MySql.ascii135
-rw-r--r--src/Case_Insensitive_Matching_in_C++.ascii192
-rw-r--r--src/Cell_Provider_Comparison.ascii44
-rw-r--r--src/Changing_the_Hostname_on_a_Linux_Box.ascii58
-rw-r--r--src/Church_Media_Computer_Setup.ascii108
-rw-r--r--src/Command_Line_Auto-Complete.ascii52
-rw-r--r--src/Comparing_Remote_Files_Without_Breaking_a_Sweat.ascii57
-rw-r--r--src/Compiling_KeePassX_2_from_Source_with_Qt_4.8.0.ascii78
-rw-r--r--src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii38
-rw-r--r--src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii54
-rw-r--r--src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii63
-rw-r--r--src/Converting_Disks_in_Hyper-V.ascii65
-rw-r--r--src/Converting_Hyper-V_VHDs.ascii53
-rw-r--r--src/Cool,_Fun,_and_Mostly_Useless_Things_to_do_with_Linux.ascii139
-rw-r--r--src/Cool_Vim_Trickery.ascii115
-rw-r--r--src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii55
-rw-r--r--src/Creating_Text_Outlines_in_CSS.ascii38
-rw-r--r--src/Cyanogenmod_7_on_the_Evo.ascii81
-rw-r--r--src/DD-WRT:Change_Root_SSH_Password.ascii35
-rw-r--r--src/DNS_Backup_Script.ascii105
-rw-r--r--src/Default_Solaris_man_Pager.ascii39
-rw-r--r--src/Dell_V305_Printer_on_Linux.ascii190
-rw-r--r--src/Digraphs.ascii114
-rw-r--r--src/Divs_That_Move_When_Users_Scroll.ascii82
-rw-r--r--src/Don't_Censor_Me_Bro!.ascii124
-rw-r--r--src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii106
-rw-r--r--src/Duplicating_a_USB_Stick_with_dd.ascii79
-rw-r--r--src/EXE_Disassociation.ascii53
-rw-r--r--src/Empathy_Accounts_Dialog_won't_Launch.ascii36
-rw-r--r--src/Enabling_Colors_in_Ksh88.ascii38
-rw-r--r--src/Encrypting_Home_Directories_with_EncFS.ascii84
-rw-r--r--src/Exim_Spam_Filtering_with_Bogofilter.ascii289
-rw-r--r--src/Expanding_Divs_Containing_Floated_Elements.ascii29
-rw-r--r--src/Finding_Prime_Factors.ascii77
-rw-r--r--src/Finding_the_Absolute_Path_of_a_Bash_Script.ascii57
-rw-r--r--src/Fixing_Android_Mac_Address_Conflicts.ascii102
-rw-r--r--src/Git:Branch_Author_List.ascii62
-rw-r--r--src/Git:Care_Free_Committing.ascii97
-rw-r--r--src/Git:Changing_Project_Licensing.ascii60
-rw-r--r--src/Git:Clone_All_Remote_Repos.ascii109
-rw-r--r--src/Git_Basics.ascii220
-rw-r--r--src/Git_as_a_Backup_Solution.ascii102
-rw-r--r--src/Google_Apps_Users_:_Cannot_Use_Self-hosted_XMPP.ascii70
-rw-r--r--src/How_to_Uninterest_Me_in_Your_Job_Opening.ascii87
-rw-r--r--src/Hyper-V_and_Vista.ascii48
-rw-r--r--src/IOHQ_Status.ascii14
-rw-r--r--src/Indenting_in_VI.ascii38
-rw-r--r--src/Install_Java_6_on_Debian_Lenny_5.0.ascii64
-rw-r--r--src/Installation_of_Aptana_Studio_into_Eclipse.ascii36
-rw-r--r--src/Installing_Gimp_2.7_via_a_PPA.ascii50
-rw-r--r--src/Installing_KDE_4.6_in_Debian.ascii54
-rw-r--r--src/Installing_Team_Foundation_Server_2008.ascii183
-rw-r--r--src/Installing_Team_Foundation_Server_2010_Beta_1.ascii200
-rw-r--r--src/Installing_Visual_Studio_2008_Service_Pack_1.ascii43
-rw-r--r--src/Javadoc-style_Perl_Documentation_Generator.ascii148
-rw-r--r--src/Kill_All_Connections_to_SQL_Database.ascii38
-rw-r--r--src/Kubuntu_and_Bluetooth_Audio.ascii65
-rw-r--r--src/Let's_get_started..._again.ascii35
-rw-r--r--src/Linux:At_the_Office.ascii228
-rw-r--r--src/Linux:Checking_CPU_Core_Usage.ascii55
-rw-r--r--src/Linux:Comparing_Remote_with_Local.ascii118
-rw-r--r--src/Linux:Desktop_Sharing.ascii73
-rw-r--r--src/Linux:Formatting_a_Hard_Drive.ascii108
-rw-r--r--src/Linux:Luks_Password_Changing.ascii43
-rw-r--r--src/Linux:RAID_Setup.ascii253
-rw-r--r--src/Linux:Secure_Authentication.ascii264
-rw-r--r--src/Linux:Symantec_VIP_Access.ascii32
-rw-r--r--src/Linux:System_Encryption.ascii155
-rw-r--r--src/Linux:Using_Bash_to_Generate_a_Wordlist.ascii84
-rw-r--r--src/Linux:Vpnc_Restart_Script.ascii47
-rw-r--r--src/Linux:dm-crypt_Encrypted_Home_Directories.ascii213
-rw-r--r--src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii131
-rw-r--r--src/Lucid_Lynx_Release_Date.ascii32
-rw-r--r--src/MPlayer:Recursively_Play_All_Files.ascii75
-rw-r--r--src/Managing_Linux_with_Linux.ascii56
-rw-r--r--src/MediaWiki_vs_SharePoint.ascii100
-rw-r--r--src/Migrating_SQL_Data.ascii110
-rw-r--r--src/Migrating_from_Drupal_7_to_Habari_.8.ascii91
-rw-r--r--src/Mounting_Drives_in_Linux_Without_Root.ascii50
-rw-r--r--src/Mutt:Email_Notifications.ascii72
-rw-r--r--src/Mutt:Sorting_Mail_Like_a_Boss.ascii61
-rw-r--r--src/Mutt:Useful_Muttrc_Color_Regular_Expressions.ascii40
-rw-r--r--src/MySql:Find_all_Required_Columns.ascii43
-rw-r--r--src/My_.bashrc.ascii40
-rw-r--r--src/My_Favorite_Blogs.ascii22
-rw-r--r--src/My_Favorite_Open_Source_Projects.ascii104
-rw-r--r--src/Net_Neutrality.ascii138
-rw-r--r--src/Non-Root_User_Connect_to_Wifi_in_Arch_Linux_with_SLiM.ascii44
-rw-r--r--src/Note-taking_with_Vim.ascii115
-rw-r--r--src/Note_to_self:Connecting_ASP.Net_to_SQL.ascii18
-rw-r--r--src/Open_Source_Living:Browsers.ascii41
-rw-r--r--src/Opening_CHM_Files_in_Vista.ascii43
-rw-r--r--src/OwnCloud_Documents_on_Arch_Linux.ascii38
-rw-r--r--src/PHP-5.3:Class_Exception_Not_Found.ascii31
-rw-r--r--src/Perfect_Server_Debian_Installation_-_Pureftpd_Won't_Start.ascii93
-rw-r--r--src/Performing_a_MySql_Backup_Via_Command_Line.ascii42
-rw-r--r--src/Postback_Freezes_Animated_Gifs.ascii60
-rw-r--r--src/ProPresenter:Action_Hotkeys_Not_Working.ascii36
-rw-r--r--src/ProPresenter:Automatically_Advancing_Slide_Loops.ascii56
-rw-r--r--src/ProPresenter:Edit_Mode.ascii49
-rw-r--r--src/Puppet:Out_of_Range_for_Type_Integer.ascii116
-rw-r--r--src/Redirecting_a_WordPress_Site.ascii48
-rw-r--r--src/Remote_Mounting_File_Systems_Through_SSH.ascii69
-rw-r--r--src/Replacing_the_Glass_on_a_Samsung_Galaxy_S_iii.ascii133
-rw-r--r--src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii51
-rw-r--r--src/Running_Web_Services_on_Non-Standard_Ports.ascii66
-rw-r--r--src/SQL_2008_Reinstall_Errors.ascii91
-rw-r--r--src/SQL_Server_2008_Memory_Management.ascii72
-rw-r--r--src/SSH_Tunnel_Forwarding.ascii73
-rw-r--r--src/SSH_VPN.ascii89
-rw-r--r--src/Samsung_Epic_4g_Syndicate_Rom_:_Freeze_on_Boot.ascii40
-rw-r--r--src/Scheduling_Jobs_in_Linux.ascii73
-rw-r--r--src/Screenshots_from_Command_Line.ascii40
-rw-r--r--src/Scripting_Wma_to_Ogg_Conversion_in_Linux.ascii57
-rw-r--r--src/Searching_for_a_Command_in_Linux.ascii97
-rw-r--r--src/Securing_a_Postfix_Smtp_Server.ascii226
-rw-r--r--src/Server_Administration:Firewalls.ascii41
-rw-r--r--src/Sidebar.ascii5
-rw-r--r--src/Startup_Sounds_with_SLiM.ascii74
-rw-r--r--src/Streaming_Audio_Over_SSH.ascii71
-rw-r--r--src/Synchronizing_Playlists_with_a_Sansa_Fuze_and_Media_Monkey.ascii62
-rw-r--r--src/Sysprepping_Server_2008.ascii76
-rw-r--r--src/TFS_2008:Permissions_for_Creating_Team_Projects.ascii96
-rw-r--r--src/Team_Foundation_Server_2010_Beta_1_Configuration.ascii78
-rw-r--r--src/Team_Password_Management.ascii112
-rw-r--r--src/Theming_Gnome-Shell_Intro.ascii48
-rw-r--r--src/Transferring_Files_from_Windows_to_Linux.ascii40
-rw-r--r--src/Ubuntu_-_Installing_Sun_Java.ascii26
-rw-r--r--src/Ubuntu_Bridging_Network_Interfaces.ascii126
-rw-r--r--src/Ubuntu_Reinstall.ascii31
-rw-r--r--src/Updating_SSH_Keys_Across_an_Environment.ascii347
-rw-r--r--src/Upgrading_TFS_2008_Workgroup_to_TFS_2008_Standard.ascii54
-rw-r--r--src/Using_SpiderOak_with_Qt_4.7.2.ascii57
-rw-r--r--src/Vim:Frequently_Used_Bits_and_Doodads.ascii204
-rw-r--r--src/Visual_Studio_2010_Debugging_in_Remote_Locations.ascii35
-rw-r--r--src/When_Innovation_is_Appropriate.ascii119
-rw-r--r--src/Whitelist_MediaWiki_Namespaces_with_$wgWhitelistRead.ascii189
-rw-r--r--src/Writing_an_Array_to_Sql_Conversion_Function.ascii35
-rw-r--r--src/XMPP_Error:_404:_Remote_Server_Not_Found.ascii66
-rw-r--r--src/Xfce_4.10_Pre_2_Review.ascii66
-rw-r--r--src/Xkcd:1110.ascii67
-rwxr-xr-xsrc/files/00-desktop.jpgbin108600 -> 0 bytes
-rwxr-xr-xsrc/files/01-xfce-settings.jpgbin116907 -> 0 bytes
-rwxr-xr-xsrc/files/01TeamExplorerTeamProjects.PNGbin69383 -> 0 bytes
-rwxr-xr-xsrc/files/01_ClickSettings_-_X.jpgbin20669 -> 0 bytes
-rwxr-xr-xsrc/files/01_Open_Test.jpgbin34427 -> 0 bytes
-rwxr-xr-xsrc/files/01_SQL_Migration_ScriptDatabaseAs.pngbin84513 -> 0 bytes
-rwxr-xr-xsrc/files/01_Welcome.jpgbin16813 -> 0 bytes
-rwxr-xr-xsrc/files/02-xfce-user-actions.jpgbin9663 -> 0 bytes
-rwxr-xr-xsrc/files/02GroupMemberships.PNGbin16589 -> 0 bytes
-rwxr-xr-xsrc/files/02_Edit_Test_Run_Configurations.jpgbin64050 -> 0 bytes
-rwxr-xr-xsrc/files/02_HardDiskMainSettings_-_X.jpgbin26539 -> 0 bytes
-rwxr-xr-xsrc/files/02_SQL_Select_Import_Data.pngbin118063 -> 0 bytes
-rwxr-xr-xsrc/files/03-xfce-window-resize-hot-edge.jpgbin61044 -> 0 bytes
-rwxr-xr-xsrc/files/03SQLServerReportingServices.PNGbin31831 -> 0 bytes
-rwxr-xr-xsrc/files/03_Enter_Service_Account.jpgbin18707 -> 0 bytes
-rwxr-xr-xsrc/files/03_SQL_Import_Choose_DataSource.pngbin47225 -> 0 bytes
-rwxr-xr-xsrc/files/03_Select_Controller.jpgbin45839 -> 0 bytes
-rwxr-xr-xsrc/files/04CentralAdministration.PNGbin36664 -> 0 bytes
-rwxr-xr-xsrc/files/04_Answer_Prompt.jpgbin14622 -> 0 bytes
-rwxr-xr-xsrc/files/04_HardDiskPreConvert_-_X.jpgbin15014 -> 0 bytes
-rwxr-xr-xsrc/files/04_Rediness_Checks_Successful.jpgbin19446 -> 0 bytes
-rwxr-xr-xsrc/files/05_Applying_Configuration_Settings.jpgbin14421 -> 0 bytes
-rwxr-xr-xsrc/files/06_Success.jpgbin12923 -> 0 bytes
-rwxr-xr-xsrc/files/07_HardDiskConverting_-_X.jpgbin16601 -> 0 bytes
-rwxr-xr-xsrc/files/A-hotmail01.jpgbin50135 -> 0 bytes
-rwxr-xr-xsrc/files/A-hotmail02.jpgbin51171 -> 0 bytes
-rwxr-xr-xsrc/files/Cc-sa_88x31.pngbin5083 -> 0 bytes
-rw-r--r--src/files/Cell_comparison.odsbin41698 -> 0 bytes
-rw-r--r--src/files/Cell_comparison.xlsxbin11426 -> 0 bytes
-rwxr-xr-xsrc/files/IIS_01_Add_Role.jpgbin15988 -> 0 bytes
-rwxr-xr-xsrc/files/IIS_02_Role_Services.jpgbin17238 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2335_gsiii-no-glass-sm.jpgbin430302 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2337_gsiii-no-glass-dirty-sm.jpgbin263696 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2338_gsiii-glass-pile-sm.jpgbin264252 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2343_gsiii-no-glass-clean-sm.jpgbin180286 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2344_gsiii-new-glass-sm.jpgbin209006 -> 0 bytes
-rwxr-xr-xsrc/files/Img_2348_gsiii-new-glass-and-case-sm.jpgbin257228 -> 0 bytes
-rwxr-xr-xsrc/files/MgmtStudio1.jpgbin28450 -> 0 bytes
-rwxr-xr-xsrc/files/MgmtStudio2.jpgbin17555 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter-Multiselect_Move.pngbin140133 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter-Slide_lock-locked.pngbin47229 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter-Slide_lock-unlocked.pngbin19277 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter_Slide_Loops00.pngbin52515 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter_Slide_Loops01.pngbin525408 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter_Slide_Loops02.pngbin465210 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter_Slide_Loops03.pngbin47356 -> 0 bytes
-rwxr-xr-xsrc/files/ProPresenter_Slide_Loops04.pngbin70186 -> 0 bytes
-rwxr-xr-xsrc/files/SQL_03_Instance_Configuration.jpgbin19684 -> 0 bytes
-rwxr-xr-xsrc/files/Sansa-Fuze-MM-Playlist-Options.jpgbin213955 -> 0 bytes
-rwxr-xr-xsrc/files/TFS_02_Features_to_Install.jpgbin21156 -> 0 bytes
-rwxr-xr-xsrc/files/TFS_04_MidInstall_Restart.jpgbin20035 -> 0 bytes
-rwxr-xr-xsrc/files/Toshiba_Canvio_Slim.pngbin164235 -> 0 bytes
-rwxr-xr-xsrc/files/WD_Elements.jpgbin19877 -> 0 bytes
-rwxr-xr-xsrc/files/Winscp01.jpgbin41241 -> 0 bytes
-rwxr-xr-xsrc/files/Winscp02.jpgbin116436 -> 0 bytes
-rwxr-xr-xsrc/files/gimp271-sm.jpgbin34779 -> 0 bytes
-rwxr-xr-xsrc/files/iohq-logo.pngbin5954 -> 0 bytes
-rwxr-xr-xsrc/files/pathauto-alias-strings0.jpgbin28965 -> 0 bytes
-rwxr-xr-xsrc/files/terminal001.pngbin49173 -> 0 bytes
-rwxr-xr-xsrc/files/terminal002b.pngbin30565 -> 0 bytes
-rwxr-xr-xsrc/files/terminal003.pngbin66494 -> 0 bytes
-rwxr-xr-xsrc/files/terminal004.pngbin78507 -> 0 bytes
-rw-r--r--src/files/wpid-screenshot_29.jpgbin119668 -> 0 bytes
-rw-r--r--src/files/wpid-screenshot_31.jpgbin39887 -> 0 bytes
-rw-r--r--src/files/wpid-screenshot_32.jpgbin132816 -> 0 bytes
-rw-r--r--src/files/wpid-screenshot_33.jpgbin44242 -> 0 bytes
-rw-r--r--src/files/wpid-screenshot_36.jpgbin107039 -> 0 bytes
-rw-r--r--src/res/footer.html9
-rw-r--r--src/res/header.html12
-rw-r--r--src/res/style.css204
-rw-r--r--src/test.ascii8
230 files changed, 0 insertions, 13744 deletions
diff --git a/src/A_Usual_Opening.ascii b/src/A_Usual_Opening.ascii
deleted file mode 100644
index 9585bcd..0000000
--- a/src/A_Usual_Opening.ascii
+++ /dev/null
@@ -1,57 +0,0 @@
-A Usual Opening
-===============
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Hello All,
-
-For a typical opening post to a new blog, I shall describe in a typical,
-boring, and mundane way my purposes and intentions for this blog.
-
-If any of you have been using Linux for a duration shorter than that of your
-own life (or post-womb), you have undoubtedly realized that documentation on
-most Linux products is not readily available (nor is it even available in most
-cases). You also understand that when being inquisitive, the "RTFM" response
-usually leaves in one's own heart an empty hole roughly the size and shape of a
-loved one.
-
-While frustrating and repelling as these hindrances can be, Linux remains to be
-a distant and strangely enticing operating system. The notion of free software
-for all just seems too amazing to drop $2,500 on an apple (yes, I am absolutely
-biased). Despite the shortcomings that come with software less-tested than
-software with a revenue stream (in most cases at least), most people are
-willing to forgive a little more in the absence of a price tag. After all, a
-fully operational and functional computer that costs only what was paid for the
-hardware is hardly an offer to be scoffed at.
-
-[[who-am-i]]
-== Who am I?
-
-_Professionally,_ I am a systems admin (Windows and Linux) as well as a
-developer (PHP, Javascript, C# and ASP.Net, Joomla, Drupal, and SharePoint).
-
-__Personally__, I am a spoiled Windows user that near the beginning of 2009
-decided to give Linux a chance at being my primary operating system. I have
-since then settled on Ubuntu Linux and have yet to look back for anything other
-than good documentation.
-
-I understand that the mentality of much of the Linux community is that if you
-don't know how to use it now, you're not smart enough to figure it out and
-therefore should not use it at all. Ubuntu/Canonical seeks to have a different
-view on these things. They understand that if Linux is ever to have a market
-share close to that of even Apple, it must be user friendly and have some way
-for the end-user to get tech support somewhere other than online forums
-(outside of the Ubuntu forums of course).
-
-_Through this blog I hope to express the troubles I have run into in my
-transition from Windows to Linux as well as the solutions/substitutes I have
-found while researching said troubles._ Should anyone have any questions,
-please contact me via the comments section or via my e-mail. I will happily
-assist if I can (remember, I am still a sub-par Linux user).
-
-Thanks for reading everyone.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/About.ascii b/src/About.ascii
deleted file mode 100644
index 18ce3f0..0000000
--- a/src/About.ascii
+++ /dev/null
@@ -1,92 +0,0 @@
-About
-=====
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-link:https://iohq.net/[Iohq.net] is my attempt at contributing back to the
-world's largest knowledgebase in history: the internet. Here I document my
-various experiences with technology and anything else I care to blog about.
-
-link:https://github.com/nullspoon[GitHub page]
-
-
-== Who Am I
-
-How dramatic does that sound? My name is Aaron Ball, and as per my Twitter
-page, I am a nix engineer, Android tester, open source fanatic, and regional
-champion of rock paper scissors lizzard spock (but only with my left hand). I
-thank God routinely for all the engineers He has put on this Earth to discover
-and make all the great things that have been made. The world is a complex and
-interesting place and there's nothing like poking at something to figure out
-how it works.
-
-
-== Contact
-
-You can send me an email at my username (nullspoon) at iohq.net (isn't
-obsfucation great?).
-
-If you are particularly concerned with security or just want to add a fellow
-PGP user to your list of security-minded friends, my public key is...
-
-Brace yourselves, it's the public for a 4096 bit private (hehe)...
-
-----
------BEGIN PGP PUBLIC KEY BLOCK-----
-Version: GnuPG v2.0.22 (GNU/Linux)
-
-mQINBFKjiVgBEADJ9jRiw9rT6r61eo432GRPCUAacYgCYrT8W8d8SY1DTUy16qyZ
-5mOqlVA1DN12n/pnPB7mgeD4csYstzl4k9dcG5206XC239JJbFB3ezB1P8VRI8VO
-k2iavV1ysYA1e/b4JMvzhQp/i9/JjeoJycLCDJz2ENl31hAsATCOQ+NjQ3Lk2c4R
-qNUJuxoapUn7NwxeY3zWx2nMlTcBVuSuzactnc62zMrB0fNfC13P59e6xiA0KZ2G
-Pbqi1L3Hk38biFLQvEXjAmx77FVpkiWfppuduavBrCXdBLukYeMv9PHR2cxriNWc
-QbZ2Df5Y2z0PrVkSOWSXOF81uNKP/9cJHoRvVOlRT4mejawaOQnrebTjy6xhGT37
-6Ve2eOJOgrTza16gHcysrePWemC0XQG25G1ZlaxYJarIGX7KrHh9pFCz51C29Eh9
-Bm6YS7S6+QmtIVpoMjfEpWZhwjWjYloPNzLiN3x37XUP231M4bJElXeEhOGZjMoG
-ltfPIheY+mWi7UozHdz5o6Kzubzc0xCkGANxIIvYs3Btj9e4n9lChNOS9eAspEoA
-iphYr09afyd/+y3qXotjGow1vmy7FdemZk0Z5MwuIj68W24TZQHPpdJBnFS9gSa4
-0YggSZEdLStMczujqeBXSL+Rznkyp+2XHaG4sn46BSsfVnlNiMgaBzyV8QARAQAB
-tDZOdWxsc3Bvb24gKFByaW1hcnkgbnVsbHNwb29uIGtleSkgPG51bGxzcG9vbkBp
-b2hxLm5ldD6JAjkEEwECACMFAlKjiVgCGwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIe
-AQIXgAAKCRCHxh4JWkQTntI7D/9iPju0cG38YEMIzKJs9MXHvU9dzh8AdycxsRXy
-pJcQ12nb5RAAWrX8sM31US8V2BpuP/WI3HguzUYVomSPAWf3cjBjpioTA6oG24nW
-jfmoIzTmskrOvS5H4ulM9mSCe1n5UqoMFe/rKwN5wKpQDWKvEns9LEVmsRJ9i8dK
-o5lcIeZF963STKEUllAnPcNhLiaoGIjRScIb+CpUlerxSGFOivN2ksmnsOkTDmCf
-mbXsskqFVZBtO0vuCXsXaM/hcqnK/2PT+jxcaSAoAjPBrx07XQpJbs+Xq78OnVzt
-Xl9tLRsyG2QYxa+rPsZHqxvKyl8CP8oLPWuwTmny0+wXCA1jMhoGlPrJmAnXQiH+
-huyPdhZJZutZQ+aiEHT6Uvd3QKJsMQfZFBdJlyFWuA7k73Rg1zaBGIvPOKCIL71Y
-JqKd7VKAQtk97Tc6KcPE31FzDeFtE65LIj7Q9vW4qi2PI5QIs022aQdezLTFovsI
-y5Tvziq0war34YrmLmVb0R6IF5uNg9WKP7GL5njk2+Fpf9J8uYA49pqtR85Jymhn
-5GlUoi7opiCo/4PBdIBcYUF7DCaUQl5z8wHXGEuHssSjjOlT9EHMGVmVVGxC/yGJ
-TAiLEAFH0+cMlN1pzxZPcK/zDRzwkdwsdvdvNgqZorXH/I7zDc0ROg4vp/Q0Iwkz
-U4Gdf7kCDQRSo4lYARAAyl6iY6VOR/Amwmeu5j5gmZZ4T+ic0oyQ9ZyCzvGoL3KU
-IDrkIzzemyOnE92xfq9NgdNa+ITNHHTXXsp9Ayey8/h1W/twkJprAr9tP73tWraG
-swqnm+C3hs7k9ntkcXkHSonewHoK9Ro4Pki/MfW6YwwssNlmpPPz08Tnn/R8x+RK
-ApNmBjx7yRwmiyQlxXAaK/LSlVM62DwLEt24n+gcN7ukp+nGx63HaxpXGMmkDXIQ
-9AzidTnoKO2KpvYkkBkd/cF0XgXKJPqU1KV/gbb4uQ21Upiht+Stuqp0Zawq9F2u
-GUEFzviMqlT5dhh0T48YzJyCdeKxpkd5XLyOKnzCW2oXlvY5lBieIHjRBil7NkMy
-ezkgsy+S1+eQDtAdAVgQi+MeeXpI5k+o1nF0rl1ivnhJPYvQ8/4oOOwuK6FwWua4
-Sd057X53Bgp7xvZKxOlEYhskgyz9W0uocgX8DhhB49rfw9c3PgqagUrDuDGQW12l
-HkxYuLMtcc7N2jpI11VXfsGCnTxmFNWXSQjzbKh50egPp4d1C6osvwWMNDWCu3ry
-GMVYj5hdDqwLQtcGD1+9uw9IYEDO+pXvdRMrPvEdPfFDvsIWXKKMM4CH4fKuMZpL
-y4esJFy15ARLcrDhD2ceN5xVaPYuz374tAWngcn44GFt9B5H1ayxRRgV/ydV3S0A
-EQEAAYkCHwQYAQIACQUCUqOJWAIbDAAKCRCHxh4JWkQTnrPIEACP0ravu0uIVyg0
-21T9k+khF7nWBRgc3e8bX1uKOqC800RePXyu96wl7DA5agvf3271NtXFFfALwkzg
-NZ2d5+KNKzoz9zrz7txmEHcgNHrWeXY220YmhgEyDD/rDS6sjGn9O/Obb+f8mEoY
-XhWrSQkGWIgtY3Qb+wZnA6gA7VzmVgHxiKcM4XH6QhJol9mgCWZs7zxcHVz0mMNf
-fffyRuf4/JkyZ6WohsMPXL0vsSX9j49n3f7N/G1TBICTQ6qDvMeRMhaJpkliVsHR
-kVy/Oo+LWQ7wEy1OJB9Ey/KUIAKP481xcCIEquV7LHFzRuNf/hPE6A9iKGgAAN1z
-FAdCwe+8BDvybW8+xt+WdHULNnPcaEIIEJAeoRWg5yomJ5ObAkrcz/F+1VxUPTle
-t5X+P7KWk0pai2GBCKhyACHN7WKqxM8BE8qg+d5Xpg4RVkerFtEIKB1PwHcsFGXX
-9mFxHblNYJ/xxxX5MK1qKIKJoyFFhNR7sw8+SKg9gCrv8nwOz15gC+4cV0LHm7mg
-1CgwS4qiAmomWLogal7O+iV960usfSFEE4BSPq7JMIDn9ICfOYjeIAQ8wCw7ZjcN
-ykOycANTpIp7O7gnSNm2V9i1JTK4M9hX9DWtse7lA1YZiYqfRHWdKE6UCapiVKl+
-Ldfv23c2sRAxPA27rFmsgcOGz9iV8w==
-=OITG
------END PGP PUBLIC KEY BLOCK-----
-----
-
-// vim: set syntax=asciidoc:
diff --git a/src/AdBlock_Only_kind_of_Blocks_Ads.ascii b/src/AdBlock_Only_kind_of_Blocks_Ads.ascii
deleted file mode 100644
index ddaf4e1..0000000
--- a/src/AdBlock_Only_kind_of_Blocks_Ads.ascii
+++ /dev/null
@@ -1,30 +0,0 @@
-AdBlock Only kind of Blocks Ads
-===============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Today I was toying around with netstat to see what incoming connections I had
-to my Linux box and noticed that for the pages you're on, your browser makes a
-connection to each of the ad's providers linked to on the given page. What's
-best (or worst) about this is the connection isn't broken until you close your
-browser (at least that I noticed).
-
-I mentioned this to my girlfriend who is a big fan of adblock and she asked (of
-course) if that happened when adblock was running. So, off I went to install
-adblock to test and sure enough, it still made the connections to the ad
-providers' servers. Obviously the ads are hidden, it just still grabs the
-resources for the ads but obscures their html. That means you're still being
-tracked by the ad providers, you just don't see it.
-
-This isn't necessarily a bad thing. I mean, before adblock they were still
-getting your information. Now it's the same, you're just not seeing animated
-gifs and full screen flash ads all over. I'm not knocking adblock at all (in
-fact, please support them in their worthy cause). I just thought I'd mention
-this for anyone wondering.
-
-Category:Adblock
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Android:Configuring_Hotmail_Exchange.ascii b/src/Android:Configuring_Hotmail_Exchange.ascii
deleted file mode 100644
index 79170ba..0000000
--- a/src/Android:Configuring_Hotmail_Exchange.ascii
+++ /dev/null
@@ -1,88 +0,0 @@
-Android:Configuring Hotmail Exchange
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-*EDIT:* I tested it for a day now and up to this point, email works great. I
-got a contact to sync from my phone to my online address book as well. It did
-however take almost a day for it to appear online.  Also, a calendar entry made
-on my phone synced in under a minute to the online calendar. The other way
-around, a calendar entry made online synced to my phone in about two minutes.
-
-Yesterday (August 30) Microsoft released a new functionality for Windows Live
-Hotmail that many have been waiting for for some time: Exchange capabilities.
-
-*If you want to skip all the background stuff, you may now head down the the
-bolded section titled "Now let's get started...".*
-
-This year Microsoft has released a lot of new functionality for Hotmail that
-has really changed its quality. With GMail being arguably the best web-based
-email service, Hotmail has a lot of catching up to do.  Thankfully, the first
-thing Hotmail started with was allowing pop3 access for free. The next step to
-compete with GMail was obviously free IMAP, which GMail released two years ago
-I believe. Instead though, Microsoft gives us exchange for Hotmail. How cool is
-that?!
-
-What's significant about exchange versus IMAP you ask? Well, exchange allows
-for the synchronization of more than just mail, unlike IMAP. With exchange you
-can sync your mail, your calendar, and your contacts. Not only does it
-synchronize your mail though, it utilizes something called "push mail". This
-means your phone doesn't check for updates ever five, ten,, or thirty minutes;
-rather, the mail is "pushed" to your email client allowing for instantaneous
-delivery. Cool, yeah?
-
-One thing before doing this though.
-
-.Obligatory Disclaimer
-~~~~
-Microsoft has not officially said that this works with Android. Many people
-have reported that it mostly works though. According to Microsoft, "Hotmail
-team will add Android to list of supported devices in the coming months after
-testing is completed."
-~~~~
-
-
-[[now-lets-get-started]]
-== Now, Let's Get Started
-
-First, launch your mail application called...you guessed it!..."Email" From
-there, chances are your Accounts screen will come up. If this is the case, hit
-the menu button on your phone and select "Add Account".
-
-____
-image:files/A-hotmail01.jpg[height=400]
-
-From there, type in your email address and password. In my case, this was
-"username@live.com", though "username@hotmail.com" should have no problem as
-well. Now, select "Manual Setup".,title="From there, type in your email address
-and password. In my case, this was "username@live.com", though
-"username@hotmail.com" should have no problem as well. Now, select "Manual
-Setup".
-____
-
-At this point the application will ask you what kind of mail connection this
-will be. You should see POP, IMAP, and Exchange. Select "Exchange".
-
-On the window you're taken to, most of the lines should be filled in for you.
-We do have to make a few changes though.
-
-____
-image:files/A-hotmail02.jpg[height=400]
-
-First off, your "DomainUsername" will be slightly incorrect. What you probably
-see is "username".,title="First off, your "DomainUsername" will be slightly
-incorrect. What you probably see is "username".
-____
-
-
-And that's it!
-
-
-Category:Android
-Category:Email
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Android:My_Phone_Configuration.ascii b/src/Android:My_Phone_Configuration.ascii
deleted file mode 100644
index 71518ae..0000000
--- a/src/Android:My_Phone_Configuration.ascii
+++ /dev/null
@@ -1,147 +0,0 @@
-Android:My Phone Configuration
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I rebuilt my phone this weekend so I could test out http://aospa.co/[AOSPA]
-(Paranoid Android) for my phone, since unofficial support was just released a
-few days ago.
-
-During my rebuild, it occured to me that I haven't seen much documentation on
-people's processes and software sets, especially for the folks who want to run
-a phone on as much open source software as possible. I have found
-https://blog.torproject.org/blog/mission-impossible-hardening-android-security-and-privacy[one
-post] written by the nice folks over at the Tor project, which discusses how to
-harden an Android device and provides a similar set of information I am about
-to provide, but it's slightly out of date. That said, here's how I run my
-phone.
-
-
-[[disabled-applications]]
-== Disabled Applications
-
-The first thing I do when booting my phone for the first time, is
-disable several applications that come preinstalled on most roms or come
-as a part of Google Apps.
-
-
-[[android-applications]]
-== Android Applications
-
-* **Browser**: I disable this one becasue with Google Apps installed and
-an account set up, it forces you to log in to all of Google's services. I use
-https://github.com/anthonycr/Lightning-Browser[Lightning Browser] instead (it's
-available on the f-droid market).
-
-* **Email**: Disabled because I use https://github.com/k9mail/k-9/[k-9
-mail] instead, due to its support for account backups, source code being
-readily available, not being developed mainly by Google, etc. K-9 is also
-available on the f-droid market.
-
-* **Exchange Services**: This one I disable because I don't have any
-exchange accounts on my phone. No sense in having it enabled if you aren't
-using it.
-
-* **One Time Init**: This is executed one time, on first boot, or so its
-name indicates. If it's running more than that, I don't want it running, so it
-is disabled.
-
-* **Sound Recorder**: I disable this one mostly because I don't use it,
-and disabling it removes its icon from my application drawer, thus saving
-space.
-
-
-[[google-appls-services]]
-== Google Appls Services
-
-* **Google Backup Transport**: I don't back up my phone to Google's
-services.
-
-* **Google Calendar Sync**: I don't sync my calendar through Google
-anymore.
-
-* **Google Contacts Sync**: I don't sync my contacts through Google
-anymore.
-
-* **Google One Time Init**: It's a one time init. No sense in leaving it
-enabled once it has run once.
-
-* **Market Feedback Agent**: I don't give market feedback on my phone.
-
-
-[[installed-applications]]
-== Installed Applications
-
-These are the applications I have installed on my phone. The majority of them
-are open source and can be found on the https://f-droid.org/[f-droid market].
-
-* **And Bible**: Open source Christian Bible for android.
-
-* **Barcode Scanner**: Useful for scanning all kind of barcodes. Open
-source and available on f-droid.
-
-* **Conversations**: This is my chat client. It supports encryption end
-to end, and has a very friendly interface. Open source and available on
-f-droid.
-
-* **DAVdroid**: I currently host all of my contacts and calendars on my
-own hosted Owncloud instance. This provides support for caldave and carddav
-syncing, which allows me to no longer keep my contacts or calendars on Google's
-services.
-
-* **Duolingo**: One of my favorite language-learning tools. Closed
-source though (I wish they'd change that, but oh well).
-
-* **f-droid**: Exclusively open source Android market. I have to
-download all these applications somehow after all.
-
-* **Flym**: Open source rss stream reader.
-
-* **K-9 Mail**: Open source fork of the stock Android email client.
-Supports backup of all accounts so they can later be re-imported (useful for us
-flash junkies)
-
-* **Kore**: Open source Kodi (or xbmc) remote control client. Available
-on f-droid.
-
-* **Lightning**: Open source and lightweight browser. Very smooth and
-fast. Available on f-droid market.
-
-* **oandbackup**: Application backup software. I don't flash a new rom
-without first using this to back up each individual application. Available on
-the f-droid market.
-
-* **Open Camera**: With the introduction of the lolipop camera, it has
-gotten much "dumber". I like all the advanced settings, so I have this
-installed.
-
-* **OpenKeychain**: Imports pgp keys. Integrates into Password Store and
-K-9 mail for encrypting/decrypting passwords, and encrypting/decrypting and
-signing emails, respective.
-
-* **Orbot**: Open source Tor client. Available on the f-droid market.
-
-* **OsmAnd~**: Open source map application. Fair replacement for Google
-Maps. Available on f-droid market.
-
-* **Password Store**: Password manager. Uses pgp to encrypt/decrypt
-password entries. Also has clients for Linux, Windows, and OsX. Available on
-f-droid market.
-
-* **Syncthing**: How I backup my phone, off-phone. Open source
-peer-to-peer synchronization client. I have mine set up to sync
-/storage/sdcard0 and /storage/sdcard1, which gets all the necessary data from
-my phone, onto my laptop. Available on f-droid market.
-
-* **Google Voice**: This is the one last Google application I haven't
-been able to replace yet, open source or no, free or no. It seems the majority
-of competing services in this arena are all tailored to business voip
-customers. I just want one phone number with text messaging support, and thus
-can't justify $40 or more per month for this kind of service. I'm still on the
-hunt though and will update this post if I ever manage to replace this
-application.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Android_Client_and_Sync_with_ownCloud_on_NGINX.ascii b/src/Android_Client_and_Sync_with_ownCloud_on_NGINX.ascii
deleted file mode 100644
index 359de3c..0000000
--- a/src/Android_Client_and_Sync_with_ownCloud_on_NGINX.ascii
+++ /dev/null
@@ -1,61 +0,0 @@
-Android Client and Sync with OwnCloud on NGINX
-==============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have been looking for a good way to move completely from Google for quite
-some time. Many of the things I am dependant upon them for are pretty vital for
-day to day operations. One of these is of course contact and calendar syncing.
-Currently, my solution to that problem is http://www.egroupware.org[EGroupware]
-for my server using syncml through http://funambol.com[Funambol] to synchronize
-my contacts from my web server to my Android phone. This solution is bulky
-taking about 80 MB on my server for the PHP code. Though this works, it is
-hardly ideal. That's why I was so excited to try out
-http://owncloud.org[ownCloud]. Their Android client is still definitely a work
-in progress, but at least it's something (not to mention that they use
-standards-based services, so several other sync apps for Android can work with
-ownCloud).
-
-Now, I run http://nginx.org[NGINX] on my web server which does things a little
-differently than Apache, especially in regards to .htaccess files. Despite that
-though, out of the box (or tarball) ownCloud seems to work perfectly. However,
-when you try to sync up your Android phone via
-http://owncloud.org/support/android/[their dandy client], you get this obscure
-error
-
-----
-Wrong path given
-----
-
-Additionally, when you check your server access logs, you'll see
-
-----
-Requested uri (/remote.php/webdav.php) is out of base uri (/remote.php/webdav/)
-----
-
-This is most likely because you need two location directives in your NGINX conf
-file (or vhost file if you're doing things that way). To fix this just put the
-following two things in your said config file (This assumes your own cloud
-server is running at
-
-----
-location /owncloud {
- index index.php;
- try_files $uri $uri/ @owncloud
-}
-location @owncloud {
- rewrite ^/owncloud/(.*)$ /owncloud.php/index.php?p=$1 last;
-}
-----
-
-And that should do it for you!
-
-
-Category:Nginx
-Category:Android
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Android_Screen_Density.ascii b/src/Android_Screen_Density.ascii
deleted file mode 100644
index 2d18cc1..0000000
--- a/src/Android_Screen_Density.ascii
+++ /dev/null
@@ -1,90 +0,0 @@
-Android Screen Density
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Recently a Droid X owner I know showed me a software that can change the screen
-density on Android phones (I can't remember if it was a part of his rom or
-not). I thought it was cool, so I set out to find a way to manually change
-screen density without installing any additional software since I try to run as
-minimalistic an install on my phone as possible (my fellow Evo users out there,
-you know why).
-
-Just before we start things off here, I'd like to put a disclaimer on this one.
-You likely won't brick your phone (you'd have to try really hard or have really
-bad luck), but you can mess it up pretty bad as we will be editing a system
-configuration file. If you cause some problems, please feel free to ask
-questions about it and I will try my best to help, but I offer no warranty or
-guarantee on this.
-
-With that out of the way, let's get started!
-
-As many things do in Android, this requires root as we will have to remount the
-/system partition.
-
-First things first, crack open your terminal emulator. If you don't have this,
-you can find it on the market, however most roms includes this application by
-default.
-
-Once in terminal emulator, run the following command:
-
-----
-su
-----
-
-This logs your terminal session in as root(or **S**uper **U**ser so we can
-perform the various operations needed to make the change. Obviously, your
-superuser software will kick in here. Just select Allow.
-
-Now that we are logged in as root, run the following command.
-
-----
-mount -o remount,rw /system
-----
-
-This will remount the /system partition with read/write permissions. Without
-running this command, we can't save the config file we will be editing in a
-few. The default android has for this on boot is read only permissions, as this
-partition contains some pretty critical stuff (it isn't called system for
-nothing). This is a good security measure to keep programs from changing all
-kinds of stuff on your phone. No worries however, we will only have write
-permissions set up for a few minutes.
-
-Now, open up the build properties file located at /system/build.prop I
-am assuming here that you know how to use VI. If you don't, I am in the
-process of writing up a post on using VI in Android terminal emulator.
-If you know how to use it on a desktop, VI on Android is very similar
-and you should be able to proceed as I detail later how to hit the
-escape key when you don't have one on your phone.
-
-----
-vi /system/build.prop
-----
-
-Scroll down until you see **ro.sf.lcd_density = 160**. If you change this
-number and reboot your phone, your screen density will change on startup. I
-typically use 120.
-
-Finally, save the file and reboot. For you EVO users who don't have a qwerty
-keyboard with an escape key (who has one of those anyways), press **volume up +
-e**. I believe volume up is the terminal emulator equivalent of ctrl.
-
-Reboot phone for your changes to take effect.
-
-*WARNING (Wik)* : Don't change this value to too small or you won't be able to
-use your phone because everything will be tiny. You have been warned.
-
-*WARNING WARNING (Also Wik)* : When I discovered the build.prop file, I was
-most excited and started poking around. I noticed a lot in there that could
-cause problems for your phone. Be careful when changing the values in this
-file. It can be fun, but you might end up with an unusable phone until you
-reflash it.
-
-
-Category:Android
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Aol_Email_Hacked.ascii b/src/Aol_Email_Hacked.ascii
deleted file mode 100644
index a5121a3..0000000
--- a/src/Aol_Email_Hacked.ascii
+++ /dev/null
@@ -1,237 +0,0 @@
-Aol Email Hacked
-==================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-About four days ago, my dad's email account began spamming me. I initially
-thought the email looked fishy, but it had a few things about it that made it
-seem relatively legitemate. The first reason being that my dad frequently sends
-me news articles (the email had a link to a "news" article, albeit a suspicious
-one). The second was that the people included on the email were all people he
-knows. And thus, I clicked the link on my phone and it promptly took me to a
-website that downloaded the file "security.update.apk" to my phone. I said to
-myself, "Self, that looks like something bad. Better not install that."
-
-And so I didn't. After seeing the malicious file download though, I went back
-to my "dad's" email and had a good look at the headers and there it was:
-several non-Aol mail servers in line, ending with my server which didn't mark
-it as spam for a very key reason.
-
-
-== The Problem
-
-Most people don't know that the to, cc, bcc, subject, and body are not the only
-fields you can change in an email. Many who run their own mail servers for the
-first time have an epiphany that they can change any field on an email,
-including the *from* field. So what's to keep us from framing Roger Rabbit?
-It's very easy to send an email from someone else without actually being logged
-in to their account. The server conversation for that scenario would go roughly
-like this...
-
-[quote]
-____
-**super_sketchymail.info**: Pssst. Hey you over there...I have a letter for
-you. _*sulks into the shadows*_
-
-**gmail.com**: Okay? Lemme see... Oh look. It's a letter to
-frank15689@gmail.com and it's from james19875@aol.com. Okay! I'll go deliver
-this to him.
-
-**super_sketchymail.info**: _*continues handing out false letters*_
-____
-
-There might be a subtle something you missed in that conversation just now. The
-email is coming from __super_sketchymail.info__, but the letter itself says
-it's from aol.com. The point here is that Gmail missed that it was a fraudulent
-email and now Frank has it in his inbox.
-
-
-== The Solution: SPF
-
-There are many methods to detect and respond to fraudulent emails. One of them
-(the topic of this post) is this great thing invented by the elders of the
-internet called SPF, or **s**ender **p**olicy **f**ramework. In a scenario
-where SPF was implemented, the mail server conversation would go roughly like
-this...
-
-[quote]
-____
-**super_sketchymail.info**: Pssst. Hey you over there...I have a letter for
-you. _*sulks into the shadows*_
-
-**gmail.com**: Okay? Lemme see... Oh look. It's a letter to
-frank15689@gmail.com and it's from james19875@aol.com. Lemme check with aol.com
-first to make sure they say _super_sketchymail.info_ can send email on their
-behalf
-
-**gmail.com**: Hey **aol.com**, can *super_sketchymail.info* send email on your
-behalf?
-
-**AOL.com**: No they cannot!
-
-**gmail.com**: Nope! They say you can't. Sorry pal, I'm not going to
-deliver this.
-____
-
-Effectively what SPF provides is a way for a mail server to verify that the
-server delivering the mail is approved to do so for the given email address
-(the _from_ field). In the previous conversation, super_sketchymail.info was
-trying to deliver mail on behalf of Aol. Gmail then checked with Aol (their
-SPF records) and saw that their list of approved mail servers did not include
-super_sketchymail.info, and thus the email would not be delivered.
-
-Isn't that a great little bit of functionality?
-
-
-[[where-aol-went-wrong]]
-== Where AOL Went Wrong
-
-[[the-technical-version]]
-=== The Technical Version
-
-The functionality I just described is really great...if you have it in
-place. Aol _does_ have it in place, just not correctly. A quick lookup
-of their DNS and we'll see why.
-
-**Note** that this DNS lookup is as of 2014.04.21.
-
-----
-$ dig -t txt aol.com
-
-; <<>> DiG 9.9.2-P2 <<>> -t txt aol.com
-;; global options: +cmd
-;; Got answer:
-;; ->>HEADER<<- opcode: QUERY, status: NOERROR, id: 32129
-;; flags: qr rd ra; QUERY: 1, ANSWER: 2, AUTHORITY: 0, ADDITIONAL: 1
-
-;; OPT PSEUDOSECTION:
-; EDNS: version: 0, flags:; udp: 4000
-;; QUESTION SECTION:
-;aol.com. IN TXT
-
-;; ANSWER SECTION:
-aol.com. 3600 IN TXT "v=spf1 ptr:mx.aol.com ?all"
-aol.com. 3600 IN TXT "spf2.0/pra ptr:mx.aol.com ?all"
-
-;; Query time: 62 msec
-;; SERVER: 172.20.0.40#53(172.20.0.40)
-;; WHEN: Wed Apr 23 08:39:02 2014
-;; MSG SIZE rcvd: 228
-----
-
-
-The key bits in there are the two lines that have "spf1" and spf2.0" and
-end with "?all". Thos two DNS entries say a bit more than we'll discuss
-here, so the most important bit in there for the purposes of this post
-is the **?all**. What that says is that any host who doesn't match any
-of the previous policies in any way, mark as neutral. When a server
-checks Aol's DNS entries to confirm if a server is approved to send
-emails, instead of saying an emphatic __no__, it says "__Yeah, sure.
-Whatever__". I think that flag could be better described as the
-ambivolent flag.
-
-The bit that ends an spf record (the _all_ bit) can have one of four
-qualifiers: +, ?, \~, and -. Most SPF records (arguably all) should end
-with _-all_ because that disowns all mail servers that don't match the
-previous policies. Aol uses the __?all__, which is neutral (as
-mentioned).
-
-
-[[the-less-technical-version]]
-=== The Less Technical Version
-
-Basically, the way AOL has their DNS SPF records configured, they almost
-approve anyone to send mail as Aol. I say _almost approve_ because they take
-only a neutral standpoint on any server that tries to send mail as them. This
-is a huge problem because anyone who runs a mail server can spoof headers, send
-mail as Aol users, and services like Gmail can't verify that it's not from Aol,
-because Aol says that it's okay for any server to send mail as them.
-
-The quick solution here for Aol is to flip that *?all* to a **-all**. My guess
-is that Aol has some vendors sending mail as them and they haven't taken the
-time to put their vendors servers in DNS (easily fixable with the INCLUDE
-mechanism). Other than that though, there's really no reason to have the ?all
-in place that I can think of (besides just not knowing how spf works).
-
-
-[[one-final-issue]]
-== One Final Issue
-
-Despite Aol's DNS mis-configuration, there is one final issue that I can't
-really speak much to. It goes back to the emails I've been receiving from my
-"dad's" email account. Each of those is written to people from his contact
-list, which indicates that someone was able to get in to Aol (or their user
-data got out) and acquire user's contact lists. If they got their contact lists
-though, who knows what else they were able to get.
-
-How big was this breach? I can't say. Aol
-http://techcrunch.com/2014/04/21/aol-mail-hacked-with-spoofed-accounts-sending-spam/[confirmed
-the breach] just two days ago. Hopefully Aol doesn't play this out poorly and
-try to keep everyone in the dark. I'll post back here as I learn more.
-
-
-[[update-2014.05.11]]
-== Update: 2014.05.11
-
-It's actually been a while since the issue was "resolved", I just haven't had a
-chance yet to post back on it. Now though, it's snowing outside (in spring), I
-have a hot mug of coffee, and my cat is sleeping on the recliner instead of my
-keyboard. Let's get started. First, let's have a look at AOL's DNS to see how
-they've done fixing it up.
-
-
-----
-$ dig -t txt aol.com
-
-...
-;; ANSWER SECTION:
-aol.com. 1942 IN TXT "v=spf1 ptr:mx.aol.com include:spf.constantcontact.com include:aspmx.sailthru.com include:zendesk.com ~all"
-aol.com. 1942 IN TXT "spf2.0/pra ptr:mx.aol.com include:spf.constantcontact.com include:aspmx.sailthru.com include:zendesk.com ~all"
-...
-----
-
-
-It looks like they've certainly thoroughly updated their DNS. In application,
-their fix _should_ prevent folks from being able to spoof legitemate AOL
-accounts, but that's actually only because of their vendors having their DNS
-configured properly. To be extra clear, the reason the problem is fixed is not
-because AOL has actually implemented a solid fix. As mentioned earlier in
-link:#The_Technical_Version[ the technical version section], there are four
-qualifiers for the trailing _all_ bit, AOL chose to use the **~**, a soft fail.
-This will still not disown non-AOL server sending mail as AOL. It will only
-"raise suspicion" for those emails. However, thanks to their vendors knowing
-what they're doing (aspmx.sailthru.com and at least), their spf records
-actually end with a __-all__, or a hard fail.
-
-To give a simple overview of how AOL's DNS works now, they basically include
-all of their vendor's spf records in their own spf record. That means that if
-any of their vendors break their own DNS to allow anyone to spoof the vendor,
-the "spoofers" can also apoof AOL users because AOL's DNS is including the
-vendor's bad DNS configuration. In this case though, one of AOL's vendors
-(aspmx.sailthru.com), ends with a __'-all__, causing AOL's DNS configuration to
-be secure becuase one of their vendors made an alright decision in their
-configuration. Dear AOL...
-
-One final thing to note regarding the remainder of the breach.
-http://www.pcworld.com/article/2148523/aol-traces-mystery-spam-to-security-breach.html[AOL
-has confirmed] that there was indeed a security breach wherein the attackers
-gained access to user's complete address books (email address, names, physical
-mailing addresses, etc) as well as encrypted security questions/answers and
-encrypted passwords (gosh I hope they mean hashed instead of encrypted
-passwords). I hope that AOL comes out with a detailed report as to how the
-attackers gained access to their systems. Given their mishap with DNS (benefit
-of the doubt), I hope the hack on their servers wasn't nearly as obvious. Also
-I'd like to know for my own edification. Due to this leak, I have begun
-receiving an increased amount of non-AOL spam as if my email address was
-released to more spammers. Thanks AOL. I guess though that it was bound to
-happen sometime by someone. Why not AOL.. At least I got to learn
-link:Exim_Spam_Filtering_with_Bogofilter[how to set up a spam filter for Exim].
-
-Category:Aol
-Category:Security
-Category:DNS
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Apache:Listening_Port.ascii b/src/Apache:Listening_Port.ascii
deleted file mode 100644
index e7281f1..0000000
--- a/src/Apache:Listening_Port.ascii
+++ /dev/null
@@ -1,60 +0,0 @@
-Apache:Listening Port
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently had a web server behind a load balancer that needed to listen on a
-different port because the load balancer was translating port 80 traffic for
-security. Thankfully, changing the ports that HTTPd listens on is relatively
-simple.
-
-Crack open your favorite command line editor. For the purposes of this example,
-I'll be using vim.
-
-----
-vim /etc/httpd/conf/httpd.conf
-----
-
-This is a relatively large file, so you'll have to scroll for some time
-to find what we're looking for. Otherwise, you can use search. Scroll
-down until you find a section that looks like
-
-----
-#
-# Listen: Allows you to bind Apache to specific IP addresses and/or
-# ports, in addition to the default. See also the <VirtualHost>
-# directive.
-#
-# Change this to Listen on specific IP addresses as shown below to
-# prevent Apache from glomming onto all bound IP addresses (0.0.0.0)
-#
-# Listen 12.34.56.78:80
-#
-Listen 80
-----
-
-Depending on your needs, whatever ports you need, add "Listen <port>" in this
-section. If for instance you need httpd to listen for an ssl (https) encrypted
-connection, you can add
-
-----
-Listen 443
-----
-
-Once you've made the changes you want, save and close the file and run the
-command
-
-----
-/etc/init.d/httpd restart
-----
-
-That's all there is to it! Now for lunch.
-
-
-Category:Apache
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Attached_Devices_and_VPS_(OpenVZ_and_Virtuozzo).ascii b/src/Attached_Devices_and_VPS_(OpenVZ_and_Virtuozzo).ascii
deleted file mode 100644
index 93c668d..0000000
--- a/src/Attached_Devices_and_VPS_(OpenVZ_and_Virtuozzo).ascii
+++ /dev/null
@@ -1,21 +0,0 @@
-Attached Devices and VPS (OpenVZ and Virtuozzo)
-===============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-*Note to self:* When hosting on a VPS using OpenVZ or Virtuozzo, to list
-attached devices and their mount points, "fdisk -l" doesn't work ("cannot open
-/proc/partitions") and "/proc/partitions" doesn't exist. To list all mount
-points, run *df -h* : lists all drives and their free space
-
-/etc/fstab still works as it should.
-
-
-Category:Linux
-Category:Virtualization
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Backing_up_a_Server_Remotely_Using_Minimal_Bandwidth.ascii b/src/Backing_up_a_Server_Remotely_Using_Minimal_Bandwidth.ascii
deleted file mode 100644
index 1b1e944..0000000
--- a/src/Backing_up_a_Server_Remotely_Using_Minimal_Bandwidth.ascii
+++ /dev/null
@@ -1,66 +0,0 @@
-Backing up a Server Remotely Using Minimal Bandwidth
-====================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-The server that runs this website (and a lot of others) also runs a lot of
-other services such as an IRC server, a Jabber server, NGINX (of course), and
-various other things. I like to take a lot of backups, especially since I'm not
-the best person in the area of security. With that, my old way of backing my my
-server was relatively painful. I had a script that tarred and compressed each
-service directory individually and move it to a secure location on my web
-server for download. After download, the script would remove the backup, and
-continue to the next.
-
-The problem with this method is that it consumes a lot of bandwidth and time.
-By the time I have downloaded everything, I have used up several gigabytes of
-bandwidth. I don't mind so much about the bandwidth though. What's important
-is the time and interraction it takes.
-
-
-[[enter-the-light-bulb...]]
-== Enter the Light Bulb...
-
-I've been using rsync for some time now to mirror my laptop to my server
-at home. For some reason, it never occurred to me to use rsync with a
-private key to log in to my server and download the deltas to my local
-machine. If I want a single compressed tar file for a backup, all I have
-to do is backup my local server's copy of everything rather than doing
-it on my web server and downloading that. Ending this already too long
-blog post on this simple topic, here's the rsync command I'm using...
-
-----
-sync -avP --delete --chmod=g+rx --rsh="ssh -p1234 -i ~/.ssh/id_rsa.pdeb.user" user@server.net:/dir1 /home/server/dir1
-----
-
-
-[[a-quick-explanation]]
-== A Quick Explanation
-
-* *rsync -avP* uses default rsync settings (-a), specifies verbose mode
-(-v) and sets rsync to display its progress on each individual file as it goes
-(-P).
-
-* *--delete* option, rsync will delete files on the destination if they
-deleted on the source (this isn't default).
-
-* *--chmod=g+rx* sets the group settings on the destination to group
-with read and write. This is handy if you want to access the backups from
-another account that doesn't have access on the server too. This switch is not
-necessary.
-
-* *--rsh="ssh -p1234 -i ~/.ssh/id_rsa.user"* specifies a custom port to
-connect on (port 1234 in this case) and specifies a private key to use when
-attempting to log in.
-
-* *user@server.net:dir1 /home/server/dir1* is the host (server) to
-connect to along with the user to try (user), the source directory (:/dir) and
-the destination directory (/home/server/dir1).
-
-
-Category:Linux
-Category:Backups
-Category:SSH
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Bash:Lesser_Known_Bits.ascii b/src/Bash:Lesser_Known_Bits.ascii
deleted file mode 100644
index 859da71..0000000
--- a/src/Bash:Lesser_Known_Bits.ascii
+++ /dev/null
@@ -1,139 +0,0 @@
-Bash:Lesser Known Bits
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I won't lie, bash is my shell of choice (as if that's not obvious). Sure, the
-ability to handle arrow keys, a command history, colors, and escape sequences
-for terminal formatting are all great pieces, but most other shells can do
-those things as well. What really makes bash stand out? There's a pretty good
-list of things that are lesser known but are super useful, albeit not always
-often though. All of these are well documented in the bash man page, but that
-one is not exactly easy to find stuff in unless you know what you're looking
-for. Running it through the wc command, the bash man page apparently has 41,452
-words. All that aside though, this is a list of some lesser known things I use
-occasionally (about once a week-ish) from our friend bash.
-
-
-[[one-liner-loops]]
-== One-liner Loops
-
-This is one that is supported by most if not all of the other shells out there,
-but it is still super useful and I don't see it used often. A one-liner loop is
-effectively a very short (one line in fact) script used to perform a small
-number of operations (it gets confusing if you do too many) in bulk. A good
-example here is with a server environment of any size greater than I'd say two.
-I frequently need to check lots of servers for something, be it the existence
-of a file, the status of a file in comparison with a local copy (diffs), bulk
-modifying remote files using sed, etc.
-
-Recently though, I needed to verify the installed version of sudo
-specifically on a list of about 50 servers. I sent the list of servers
-to a text file, one server per line, and did the following and had my
-answer within about 30 seconds (it takes a few hundred milliseconds for
-ssh connections to establish on our atrociou...er...awesome network).
-
-----
-for i in $(cat ./servers.list); do echo $i; ssh user@$i 'sudo -V | grep "I/O plugin version"'; done
-----
-
-Presto! A big list of sudo versions across the entire environment.
-
-
-[[process-substitution]]
-== Process Substitution
-
-This one is really great. Some commands require one or more file paths
-to do what they need to do. A good example is diff. The diff command
-requires two file path parameters: file a and file b. What if you want
-to diff the outputs of two remote files though? Using process
-substitution, we can cat out a remote file using the typical command,
-+ssh user@server 'cat /etc/something'+, and have the output
-go to a local temp file for the life of the command calling it so we
-have something to work on. For example...
-
-----
-diff /etc/something <(ssh user@server 'cat /etc/something')
-----
-
-What we have here is a diff of the local /etc/something file and the remote
-/etc/something. The ssh connection string is encapsulated in a +<()+. This is
-the process substitution. This doesn't just work with remote files though. Say
-for instance you wanted to diff the contents of a directory on a local system
-and a remote system. Here's how you'd do that.
-
-... or comparing the output of two remote commands...
-
-----
-diff <(ls -1 /var/log/) <(ssh user@server 'ls -1 /var/log/')
-----
-
-Here we used process substitution to write the output of +ls -l /var/log+ to a
-temp file, then write the output of the same command run on another system over
-ssh to yet another temp file, then we use diff as usual to show us what is
-different. If you really wanted to get crazy, you could throw this into a bash
-one-liner loop and run the diff on multiple systems.
-
-
-[[brace-expansion]]
-== Brace Expansion
-
-Brace expansion is really neat and I think super handy. This is the one I don't
-have a lot of use for though. It gets used about once every few scripts or
-about once or twice a month. Brace expansion is effectively on-the-fly array
-loops for commands. For a simple example, say you wanted to create three
-directories: dev, test, and prod. To create these without brace expansion,
-you'd have to run _mkdir_ three times. With brace expansion, you can do this
-
-----
-mkdir {dev,test,prod}
-----
-
-That's cool, but what's REALLY cool is that you can use this with nested
-directories. Say for isntance we are creating a small (and poorly designed) dev
-environment. Inside of each we want the directories bin, etc, lib, var (we're
-just making 'em up now). Here's how you'd do that in one command
-
-----
-mkdir {dev,test,prod}/{bin,etc,lib,var}
-----
-
-That is the equivelant of <syntaxhighlight lang="bash"> mkdir dev/bin
-mkdir dev/etc mkdir dev/lib mkdir dev/var mkdir test/bin mkdir test/etc
-mkdir test/lib mkdir test/var mkdir prod/bin mkdir prod/etc mkdir
-prod/lib mkdir prod/var </syntaxhighlight>
-
-Another application for this is if you want to cat out a big list of
-specific files without catting out the entire directory (I did this one
-earlier this morning actually). Say you have 20 files called
-*list.<num>* (0-19) and you want to cat out numbers 1-9. Now, there are
-a lot of ways to do this of course, but this is how you can do it with
-brace expansion.
-
-----
-cat list.{1,2,3,4,5,6,7,8,9}
-----
-
-...or even shorter...
-
-----
-cat list.{1..9}
-----
-
-Those are the equivelant of
-
-----
-cat list.1 list.2 list.3 list.4 list.5 list.6 list.7 list.8 list.9
-----
-
-How's that for time saving.
-
-
-Category:Bash
-Category:Shells
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Benchmarks:Toshiba_Canvio_Slim.ascii b/src/Benchmarks:Toshiba_Canvio_Slim.ascii
deleted file mode 100644
index 0e4f877..0000000
--- a/src/Benchmarks:Toshiba_Canvio_Slim.ascii
+++ /dev/null
@@ -1,60 +0,0 @@
-Benchmarks:Toshiba Canvio Slim
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-If you're considering purchasing an external hard drive, :Category:Benchmarks[
-these benchmarks] should hopefully help clear up some questions you might have.
-In this post, I benchmark my http://www.toshiba.com/us/canvio-slim[Toshiba
-Canvio Slim] hard drive. I've been using this drive for about a year now and
-it has performed well for me. It has even taken a drop while it was plugged in
-(it disconnected from the cable even). Here are the IO metrics from the great
-https://www.gnu.org/[GNU] utility, dd
-
-image:Toshiba_Canvio_Slim.png[height=300]
-
-* *Manufacturer*: http://www.toshiba.com/us/[Toshiba]
-* *Name*: http://www.toshiba.com/us/canvio-slim[Canvio Slim]
-* *Made in*: Philippines
-* *Size*: 500GB
-* *Interface*: USB 3.0/2.0
-* *Average Write Speed*: 99.0 MB/s
-* *Average Read Speed*: 93.5 MB/s
-
-[[benchmarks]]
-=== Benchmarks
-
-[[usb3-devzero-write]]
-==== USB3 /dev/zero Write
-
-Writing 16 gigabytes of zeros.
-
-----
-dd if=/dev/zero of=/dev/sdc bs=1M count=16384
-16384+0 records in
-16384+0 records out
-17179869184 bytes (17 GB) copied, 173.495 s, 99.0 MB/s
-----
-
-
-[[usb3-read-to-devnull]]
-==== USB3 Read to /dev/null
-
-Reading 16 gigabytes to /dev/null.
-
-----
-dd if=/dev/sdc of=/dev/null bs=1M count=16384
-16384+0 records in
-16384+0 records out
-17179869184 bytes (17 GB) copied, 183.838 s, 93.5 MB/s
-----
-
-Category:Hard_Drives
-Category:Benchmarks
-Category:Toshiba
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Benchmarks:WD_Elements.ascii b/src/Benchmarks:WD_Elements.ascii
deleted file mode 100644
index ac3b128..0000000
--- a/src/Benchmarks:WD_Elements.ascii
+++ /dev/null
@@ -1,115 +0,0 @@
-Benchmarks:WD Elements
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== Benchmarks:WD Elements
-
-For my work computer, I installed https://archlinux.org[Arch Linux] on an
-external USB hard drive. My idea behind that is that if I'm ever working from
-home and forget to bring my charger with me, I can just plug the drive into
-another computer and I'm back up and running. So far it's worked great. A few
-months back though, I dropped the drive while it was running and while it was
-surprisingly okay (despite being read-only until reboot), it was a bit slower.
-I would assume a head crash, but thus far I have noticed no data corruption.
-
-All that said, I want to get another drive that I can mirror with (software
-raid 1 anybody?), just in case something happens. I've been hunting around
-online for the last few days and have found it to be impressively difficult to
-find real specs on external USB hard drives. Sure, you can get that it's USB3
-and maybe even its rpm, but you're almost guaranteed not to find cache size or
-even what drive is inside the enclosure, metrics I consider to be very
-important. That's why I've decided to post the IO metrics for this drive.
-Hopefully someone will find these metrics useful.
-
-image:files/WD_Elements.jpg[height=300]
-
-* *Manufacturer*: http://www.wdc.com/en/[Western Digital]
-* *Name*: Elements
-* *Made in*: Malaysia
-* *Size*: 1TB
-* *Interface*: USB 3.0/2.0
-* *Average Write Speed*: 104 MB/s
-* *Average Read Speed*: 107 MB/s
-
-
-[[benchmarks]]
-=== Benchmarks
-
-[[usb3-devzero-write]]
-==== USB3 /dev/zero Write
-
-The fastest place I can think of to get data from and avoid any bottlenecks
-outside of the drive is to write from /dev/zero. The amount of processing power
-that goes into writing all zeros __is insignificant next to the power of the
-force__...er...reading data from another drive, potentially introducing more
-bottlenecks and not getting good measurements. Let us begin...
-
-----
-dd if=/dev/zero of=/dev/sdc bs=1M count=8192
-8191+1 records in
-8191+1 records out
-8589131776 bytes (8.6 GB) copied, 82.9999 s, 103 MB/s
-----
-
-Double the amount of data being written...
-
-----
-dd if=/dev/zero of=/dev/sdc bs=1M count=16384
-16384+0 records in
-16384+0 records out
-17179869184 bytes (17 GB) copied, 161.13 s, 107 MB/s
-----
-
-Looks like overall this drive consistently averages just over 100 MB/s plugged
-in to USB3.
-
-
-[[usb3-read-to-devnull]]
-==== USB3 Read to /dev/null
-
-Here we're basically doing the same as writing from /dev/zero, but instead
-we're reading verbatim the first _x_ consecutive number of bytes and sending
-them to a device that literally can't be a bottleneck: /dev/null. It's like
-sending dead satellites floating into space
-(https://www.youtube.com/watch?v=rWVGupqvCL8[spaaaaacce]) instead of spending
-the time to land them (if they don't burn up) and disassembling. If I had to
-pick somewhere to send something fast where there wouldn't be any bottlenecks,
-the vast void of space is where I'd send it - that is equivelant to /dev/null.
-Not a great analogy, I know, but honestly, I just wanted to reference
-https://www.youtube.com/watch?v=rWVGupqvCL8[that Portal] video.
-
-----
-dd if=/dev/sdc of=/dev/null bs=1M count=8192
-8192+0 records in
-8192+0 records out
-8589934592 bytes (8.6 GB) copied, 80.5907 s, 107 MB/s
-----
-
-
-[[conclusion]]
-=== Conclusion
-
-* **Average write speed**: 104 MBps (832 Mbps = .832 Gbps)
-* **Average read speed**: 107 MBps (856 Mbps = .856 Gbps)
-
-Overall I'd say this drive is okay. As mentioned, the maximum speed of the
-https://en.wikipedia.org/wiki/USB_3.0[USB3 spec] is 5 Gbps and this is getting
-just shy of 1/5 that. I won't balk at that because a 100 megabytes per second
-transfer rate is still pretty impressive for an external hard drive (that's
-838,860,800 bits per second!).
-
-One final thing to note, I ran these benchmarks on two systems, my laptop and
-my server, to make sure the USB3 port, processor, bus, etc. weren't themselves
-bottlenecks. The transfer rates were nearly identical (insignificantly
-different).
-
-
-Category:Western_Digital
-
-Category:Hard_Drives
-
-Category:Benchmarks
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Blog_Resurrection.ascii b/src/Blog_Resurrection.ascii
deleted file mode 100644
index 7a822dd..0000000
--- a/src/Blog_Resurrection.ascii
+++ /dev/null
@@ -1,48 +0,0 @@
-Blog Ressurection
-=================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-Hello all,
-
-A few months back I moved jobs. At my last job (as you all well know from this
-blog), I was a server administrator and .Net programmer on the side. At the new
-job, I was hired primarily as a web developer (PHP, JavaScript, Joomla dev,
-etc.). Naturally, because of the position change, I was no longer doing server
-administration and consequentially had nothing new to post on this blog.
-
-That has changed now. The company I now work for didn't have much of a server
-infrastructure (a non-server edition mac that stored files), so they asked me
-to design and build a new one.
-
-All that being said, I now have new content to post here. You Linux users will
-be happy to hear that I am now using Linux for some of our servers (virtual
-host and file server). To you Windows users, I will be building an Exchange
-server, terminal server, and a backup server (this might be done on Linux
-though). Most likely things in that list will be shifted around a bit in the
-next two or so weeks so please bare with me.
-
-Some of the technologies I will be using in the upcoming posts will be
-
-* Windows Small Business Server 2008 (x64)
-* Ubuntu Server 9.10
-* VMWare Server 2
-* Microsoft Outlook Exchange
-* TightVNC
-* SSH
-
-On one final note, I would like to put out a shameless plug for my other blog,
-http://timexwebdev.blogspot.com[Musings of a Web Developer]. It covers the
-issues and fixes I have found throughout my adventures of coding websites.
-Essentially, it's the same thing as this blog but for web development rather
-than server administration.
-
-I look forward to the upcoming comments about how depressing IT work can be
-*AHEM*... I, uh, I mean the joys of IT work. Who's kidding though. Everyone
-knows that we IT people love our jobs, especially the Dilbert comics that
-document our endeavors.
-
-:P
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Btrfs:Balancing.ascii b/src/Btrfs:Balancing.ascii
deleted file mode 100644
index c5a60b6..0000000
--- a/src/Btrfs:Balancing.ascii
+++ /dev/null
@@ -1,87 +0,0 @@
-Btrfs:Balancing
-===============
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I've been using https://btrfs.wiki.kernel.org[Btrfs] on all of my systems for a
-couple of years now. Thus far, it's be surprisingly stable. In those two years
-I only had link:Btrfs:RAID_5_Rsync_Freeze[one real issue]. However, today I ran into
-a new problem. Now that I know what the issue is, it's hardly a problem, but
-hey, semantics.
-
-For my setup at home, I have a Linux server running all the time which hosts my
-backups. My backups are copied via rsync. For security, my home directories on
-all systems are encrypted block devices using
-https://code.google.com/p/cryptsetup/wiki/DMCrypt[dm-crypt] with a
-https://code.google.com/p/cryptsetup/[LUKS header]. To force myself to clean up
-my files occasionally, I only give myself some 5 gigs of leeway. If I manage to
-remove for example 10 gigs of files, I reduce the size of the filesystem and
-block device container so I still only have about 2-5 gigs free (depends on
-what I'm doing hobby-wise at the time). This is where my problem with Btrfs
-comes in.
-
-
-[[the-really-excitingboring-details]]
-== The Really (Exciting|Boring) Details
-
-This section might be super boring for some or most folks because it talks
-about the innards of Btrfs. If you aren't interested, make like a Tatooine
-speeder and move along... move along.
-
-As more storage is needed for the filesystem, chunks of raw storage are
-consumed by default 1 gigabyte at a time. As the
-https://btrfs.wiki.kernel.org/index.php/SysadminGuide#Data_usage_and_allocation[kernel.org
-page] describes, these chunks are used for file data and/or metadata storage.
-As more files are written to the filesystem, more metadata chunks are required
-to describe the additional files (data to metadata ratios can be specified at
-filesystem creation). By default, a metadata chunk cannot be used for data and
-a data chunk cannot be used for metadata (kind of - there is a mixed mode which
-is tremendously slow on filesystems larger than 1G). On a large storage device
-this is fine, but if you are constantly deleting files like me, you may run
-into the issue I ran into where the available space value is incorrect because
-the various space checking commands check for available _data_ space, not
-taking into account metadata. Because I delete so many files so often, there is
-a lot of metadata storage that is allocated but is no longer used because the
-files that the metadata were describing no longer exist, and thus the metadata
-for those files do not either. Consequently, the metadata chunks are no longer
-fully used (remember, they are allocated 1 G at a time). Due to the fact that
-metadata and data chunks cannot be mixed by default, the underused metadata
-chunks just consume storage from the overall available, reducing the amount of
-available storage for data.
-
-_*takes a deep breath*_
-
-
-[[the-solution]]
-== The Solution
-
-The solution to this issue is called a rebalance (or balance as the btrfs
-subcommand is called). What it will do is rewrite all of the data on the given
-block device, sending it through the allocator algorithm before being rewritten
-to the storage. This will cause the datas' metadata to be reallocated and
-rewritten. What results is your metadata being "restacked", potentially causing
-you to end up with completely empty 1G metadata chunks, thus freeing that
-storage space for data. This isn't a complete analogy, but you can think of
-this a [very] little like a defragment and cleanup process for metadata. Here's
-the command.
-
-----
-btrfs balance start /path/to/mount
-----
-
-If you're interested in metrics, run
-
-----
-btrfs filesystem df /path/to/mount
-----
-
-before and after you run the balance and compare your metadata values.
-
-Category:Btrfs
-Category:Linux
-Category:Filesystems
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Btrfs:RAID_5_Rsync_Freeze.ascii b/src/Btrfs:RAID_5_Rsync_Freeze.ascii
deleted file mode 100644
index bcdcb84..0000000
--- a/src/Btrfs:RAID_5_Rsync_Freeze.ascii
+++ /dev/null
@@ -1,91 +0,0 @@
-Btrfs:RAID 5 Rsync Freeze
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-My server's _/home/_ directory is a btrfs RAID 5, spanning three drives (I did
-a blog post about it Btrfs:RAID_Setup[here]). Everything worked fine, until I
-used rsync to sync my files from my laptop to my server. At that point, the
-sync would go well for a little while and then slow to a crawl. I couldn't
-cancel the sync with a ctrl+c. If I could get on my server over ssh, I'd find
-that one of my cpus was pegged at 100%. Sometimes though it got so bogged down
-I couldn't even get to the server at all. If I were already on the server and I
-did a kill -9 on rsync, it'd go defunct.
-
-I checked my logs after trying to umount /home/ and found...
-
-----
-Nov 03 12:01:18 zion kernel: device label home devid 1 transid 1173 /dev/sdb
-Nov 03 12:01:19 zion kernel: btrfs: disk space caching is enabled
-Nov 03 12:11:53 zion kernel: INFO: task umount:1668 blocked for more than 120 seconds.
-Nov 03 12:11:53 zion kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
-Nov 03 12:11:53 zion kernel: umount D ffff880037afbc60 0 1668 1653 0x00000000
-Nov 03 12:11:53 zion kernel: ffff880037afbbd0 0000000000000086 0000000000014500 ffff880037afbfd8
-Nov 03 12:11:53 zion kernel: ffff880037afbfd8 0000000000014500 ffff8800aa0caa30 0000000000000010
-Nov 03 12:11:53 zion kernel: 000000000d6fffff ffff880037afbb98 ffffffff8113a911 ffff8800afedb728
-Nov 03 12:11:53 zion kernel: Call Trace:
-Nov 03 12:11:53 zion kernel: [<ffffffff8113a911>] ? free_pcppages_bulk+0x3b1/0x3f0
-Nov 03 12:11:53 zion kernel: [<ffffffff81132700>] ? filemap_fdatawait+0x30/0x30
-Nov 03 12:11:53 zion kernel: [<ffffffff814e1029>] schedule+0x29/0x70
-Nov 03 12:11:53 zion kernel: [<ffffffff814e12cf>] io_schedule+0x8f/0xe0
-Nov 03 12:11:53 zion kernel: [<ffffffff8113270e>] sleep_on_page+0xe/0x20
-Nov 03 12:11:53 zion kernel: [<ffffffff814ddb5b>] __wait_on_bit_lock+0x5b/0xc0
-Nov 03 12:11:53 zion kernel: [<ffffffff8113284a>] __lock_page+0x6a/0x70
-Nov 03 12:11:53 zion kernel: [<ffffffff81084800>] ? wake_atomic_t_function+0x40/0x40
-Nov 03 12:11:53 zion kernel: [<ffffffff81141fa3>] truncate_inode_pages_range+0x613/0x660
-Nov 03 12:11:53 zion kernel: [<ffffffff81142005>] truncate_inode_pages+0x15/0x20
-Nov 03 12:11:53 zion kernel: [<ffffffffa07df172>] btrfs_evict_inode+0x42/0x380 [btrfs]
-Nov 03 12:11:53 zion kernel: [<ffffffff811b97b0>] evict+0xb0/0x1b0
-Nov 03 12:11:53 zion kernel: [<ffffffff811b98e9>] dispose_list+0x39/0x50
-Nov 03 12:11:53 zion kernel: [<ffffffff811ba56c>] evict_inodes+0x11c/0x130
-Nov 03 12:11:53 zion kernel: [<ffffffff811a1cc8>] generic_shutdown_super+0x48/0xe0
-Nov 03 12:11:53 zion kernel: [<ffffffff811a1f22>] kill_anon_super+0x12/0x20
-Nov 03 12:11:53 zion kernel: [<ffffffffa07a8ee6>] btrfs_kill_super+0x16/0x90 [btrfs]
-Nov 03 12:11:53 zion kernel: [<ffffffff811a22fd>] deactivate_locked_super+0x3d/0x60
-Nov 03 12:11:53 zion kernel: [<ffffffff811a28e6>] deactivate_super+0x46/0x60
-Nov 03 12:11:53 zion kernel: [<ffffffff811bdeaf>] mntput_no_expire+0xef/0x150
-Nov 03 12:11:53 zion kernel: [<ffffffff811bf0b1>] SyS_umount+0x91/0x3b0
-Nov 03 12:11:53 zion kernel: [<ffffffff814ea5dd>] system_call_fastpath+0x1a/0x1f
-----
-
-The only way to solve the problem was to perform a restart. After that, the
-problem would come back as soon as I started rsync again.
-
-
-[[the-solution]]
-== The Solution
-
-I hunted around for a while until I finally just searched for the name of the
-pegged process, **btrfs-endio-wri**, and cpu time. It turns out, the btrfs
-folks have https://btrfs.wiki.kernel.org/index.php/Gotchas[a page] detailing a
-list of current "gotchas" btrfs has. This issue was one of them. They describe
-it as <pre> Files with a lot of random writes can become heavily fragmented
-(10000+ extents) causing trashing on HDDs and excessive multi-second spikes of
-CPU load on systems with an SSD or large amount a RAM. ... Symptoms include
-btrfs-transacti and btrfs-endio-wri taking up a lot of CPU time (in spikes,
-possibly triggered by syncs). You can use filefrag to locate heavily fragmented
-files. </pre>
-
-One of the best parts of rsync is that is syncs deltas instead of resyncing the
-entire file. What does that result in? Lots of little random writes. Sounds
-like a match to me.
-
-**To fix this**, I defragged all of /home/ (with _compression=lzo_ of course :)
-), and remounted using the *autodefrag* option.
-
-Now I can run rsync with no problems.
-
-One last thing to note. Their gotchas page says that once they've worked out a
-few potential kinks with the autodefrag mount option, they'll make it the
-default, which should prevent this from being an issue in future versions.
-
-Category:Linux
-Category:Btrfs
-Category:Storage
-Category:RAID
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Btrfs:RAID_Setup.ascii b/src/Btrfs:RAID_Setup.ascii
deleted file mode 100644
index 624fafa..0000000
--- a/src/Btrfs:RAID_Setup.ascii
+++ /dev/null
@@ -1,165 +0,0 @@
-Btrfs:RAID Setup
-================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctype}
-
-I recently became very interested in
-http://en.wikipedia.org/wiki/Logical_Volume_Manager_%28Linux%29[LVM] and its
-ability to have a volume that spans multiple drives. I was just about to do an
-LVM setup when I began researching
-https://btrfs.wiki.kernel.org/index.php/Main_Page[btrfs] in more depth. It is
-rumored to be the next replacement for
-https://ext4.wiki.kernel.org/index.php/Main_Page[ext4], the default Linux
-filesystem (in most cases). It also happpens to support volumes that span
-multiple devices (http://en.wikipedia.org/wiki/Standard_RAID_levels[raid],
-though software raid albeit), aside from a whole
-https://btrfs.wiki.kernel.org/index.php/Main_Page#Features[list] of other
-functionalities.
-
-Being a person who really enjoys trying new, cool, and often unstable things
-(who doesn't love a good learning experience), I decided to set up a
-http://en.wikipedia.org/wiki/Standard_RAID_levels#RAID_5[raid 5] using btrfs
-with three whopping one terabyte drives. If all goes well, I should be able to
-loose one drive and still have 2 terabytes ( [3-1]*1000 = 2000 ) fully
-functional.
-
-
-[[getting-started]]
-== Getting Started
-
-Creating a btrfs filesystem is as simple as creating an ext4 filesystem (or any
-other filesystem for that matter). You use the mkfs command. However, I
-created a raid setup, so I needed a few more parameters. Here's what I used.
-
-----
-mkfs.btrfs -m raid5 -d raid5 /dev/sdb /dev/sdc /dev/sdd
-----
-
-Well that was easy. What'd we just do?
-
-[cols=",",options="header",]
-|================================================================
-|mkfs.btrfs |duh
-|-m raid5 |Sets the metadata up to use raid 5
-|-d raid5 |Set the data up to use raid 5
-|/dev/sdb /dev/sdc /dev/sdd |Span our volume across these devices
-|================================================================
-
-With that, you should now [very quickly] have a new raid 5 (or whatever you
-selected for your raid levels). To mount it, run the mount command on any of
-the raw devices in your raid.
-
-----
-mount -t btrfs /dev/sdb /mnt/oh_heyyy
-----
-
-
-[[compression]]
-== Compression
-
-Btrfs supports various kinds of seamless compression. The default is none since
-compression will cause a performance hit (naturally). I thought I'd give it a
-try anyways. I set up lzo compression (supposedly the fastest compression, but
-less effective) about half way through my sync job (forgot to do it initially).
-The original total size of the files in each home directory came to 386 GB
-(lots of users for a home system). The end result after compression was 377 GB,
-so I ended up saving 9 GB of space while still getting an amazing transfer rate
-(see the link:#Benchmarks[ benchmarks] section). Keep in mind though that I
-enabled compression after I had already synced a good 100 GB of files, so there
-is a good portion of that data that isn't compressed. Despite that though, 9 GB
-of space isn't too bad, especially given the performance.
-
-
-[[handy-commands]]
-== Handy Commands
-
-Here's what commands I'm using most frequently up to this point.
-
-* **btrfs fi**[lesystem] **show**: Shows a list of filesystems and their
-corresponding devices.
-* **btrfs fi**[lesystem] **label <dev> <label>**: Changes the label of
-the specified raid device.
-* **btrfs fi**[lesystem] **df /path/to/mount**: Displays real df data
-about the mounted volume.
-
-
-[[benchmarks]]
-== Benchmarks
-
-I know there are other ways to benchmark storage io, but I wanted to see what
-the maximum write speed would be and I don't have a second raid set up to get a
-high transfer rate in, so my fastest option at this point is /dev/zero. Here's
-my setup (again).
-
-* My btrfs raid 5 is mounted at /home/. The raid is made up of three 1
-TB Western Digital Green drives, each at 7200 rpm and it is mounted with "-o
-compress=lzo".
-* The OS itself ( / ) is installed on a single HDD, a 7200 rpm 500 GB
-Maxtor (slightly olde).
-
-[[btrfs-raid-performance]]
-=== Btrfs Raid Performance
-
-First, we test writing 2000 1M blocks of zeros to /home/, the raid.
-
-----
-[root@zion ~]# dd if=/dev/zero of=/home/bench.test bs=1M count=2000
-2000+0 records in 2000+0 records out 2097152000 bytes (2.1
-GB) copied, 6.24284 s, 336 MB/s
-----
-
-336 MB/s! Not bad for a homemade drive array using software raid and
-some desktop drives.
-
-
-[[non-raid-single-hdd-performance]]
-=== Non-Raid Single HDD Performance
-
-Starting with the same as the last but writing to /root/, the single HDD, we
-get...
-
-----
-[root@zion ~]# dd if=/dev/zero of=/root/bench.test bs=1M count=2000
-2000+0 records in 2000+0 records out 2097152000 bytes
-(2.1 GB) copied, 30.5043 s, 68.7 MB/s
-----
-
-
-[[impressions]]
-== Impressions
-
-I might not be well versed enough in the area of storage, but setting up a
-btrfs raid was really easy. I did have to learn all the different raid levels
-to decide which I wanted to use of course, but I would have done that anyways.
-The filesystem (again, spanning 3 TB) was created ridiculously fast (as fast as
-I hit the enter key). I performed an rsync from my old drive (a single 500 GB
-HDD, 7200 rpm, 3 Gbit/s) to the new raid (2 TB across 3 HDDs, 7200 rpm, 6
-Gbit/s) volume and got about a 31 MB per second transfer rate, which is the max
-transfer rate that my single 500 GB drive has ever done anyways, so at least
-btrfs can perform that well (not that that's particularly amazing). I was also
-very impressed by the 336 MB/s write speed of the raid array. Perhaps I'm
-ignorant at this point in time, but that seems pretty impressive for some cheap
-off-the-shelf desktop drives. They're not even 10k rpm, let alone 15k. I would
-certainly say that from a performance perspective, btrfs is definitely ready
-for home use. It may be a little new for enterprise use, but that's up to the
-enterprise. For me though, I will keep using it until I see any problems. Even
-then, I'll still troubleshoot and then probably continue using it.
-
-Finally, I have to give some serious credit to the guys who wrote the b-tree
-filesystem (oddly Oracle sponsored it). It's this kind of open source that
-drives the world of technology (not that others don't of course) to expand
-beyond "what the consumer wants". You guys are innovating in the coolest ways
-and best of all, you're making it freely available. Many thanks guys!
-
-
-
-Category:Linux
-Category:Btrfs
-Category:Storage
-Category:RAID
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Building_an_Ejabberd_Server_with_MySql.ascii b/src/Building_an_Ejabberd_Server_with_MySql.ascii
deleted file mode 100644
index dfea4d1..0000000
--- a/src/Building_an_Ejabberd_Server_with_MySql.ascii
+++ /dev/null
@@ -1,135 +0,0 @@
-Yesterday I was upgrading my
-http://www.igniterealtime.org/projects/openfire/[OpenFire] server and thought
-it might be fun to learn something new and switch to a different server
-software. After doing some research, I decided upon
-http://www.ejabberd.im/[ejabberd] since that one seems to be a popular solution
-(not to mention that specs of course).
-
-I keep my jabber data in a MySql database and I don't really want to migrate
-away from that. That being said, I had a really difficult time finding any
-complete documentation on how to configure an ejabberd server to work with a
-MySql database. Here's how I did it.
-
-Firstly, you of course need to grab said bin
-http://www.process-one.net/en/ejabberd/archive/[here]. Once you have extracted
-and installed, you'll need to edit your config file (conf/ejabberd.cfg). You'll
-see a section in the middle (or so) that
-looks like
-
-----
-%%% ==============
-%%% AUTHENTICATION
-
-
-%%
-%% auth_method: Method used to authenticate the users.
-%% The default method is the internal.
-%% If you want to use a different method,
-%% comment this line and enable the correct ones.
-%%
-{auth_method, internal}.
-
-
-%%
-%% Authentication using external script
-%% Make sure the script is executable by ejabberd.
-%%
-%%{auth_method, external}.
-%%{extauth_program, "/path/to/authentication/script"}.
-
-
-%%
-%% Authentication using ODBC
-%% Remember to setup a database in the next section.
-%%
-%%{auth_method, odbc}.
-----
-
-Comment out the internal auth method line
-
-----
-%%\{auth_method, internal}.
-----
-
-Now, skip down to the line and uncomment the odbc auth
-method.
-
-----
-{auth_method, odbc}.
-----
-
-Lastly in the config file, we need to configure our database connection
-string. Head on down to the following location, uncomment the first
-odbc_server line and fill in your database connection information.
-
-----
-%%
-%% MySQL server:
-%%
-{odbc_server, {mysql, "MySqlServer", "MySqlDatabase", "MySqlUsername", "MySqlPassword"}}.
-----
-
-It's at this point that you might be thinking to yourself, "...but I don't have
-a database or tables configured". This is the part where I initially got stuck.
-All of the documentation I found pointed towards a sql file that could be found
-in the source code. Other sources indicated that ejabberd needs to be compiled
-with mysql support for this all to work. Thankfully, this is not the case (as
-per my experience at least). I can't say this about the deb or the rpm
-installs, but the gzipped binary at least has this.
-
-If you go into the install location and navigate on down to
-
-----
-<ejabberd-home>/lib/ejabberd-2.1.8/priv/odbc/mysql.sql
-----
-
-and run the mysql file in there on the database you have created, you will find
-yourself with a completely empty database structure (but a structure none the
-less).
-
-Finally, we have to go back and make a few more simple changes to our conf
-file. The config file references several modules that store their data to the
-internal database, unless otherwise specified. We are going to otherwise
-specify here.
-
-Crack open that config file again located at conf/ejabberd.cfg Navigate down to
-the section that looks like the following (I won't put the whole thing in here
-since it's a big section)
-
-----
-%%% =======
-%%% MODULES
-
-
-%%
-%% Modules enabled in all ejabberd virtual hosts.
-%%
-----
-
-Here you'll find a lot of lines starting with **mod_**. These are all the
-modules your ejabberd instance will load on startup. There are several in here
-that we need to add *_odbc* to the end of to make them talk to our MySql
-database instead of the internal database. Find the following listed modules
-and add _odbc to them (I've already done that in my list)
-
-----
-{mod_last_odbc, []},
-{mod_offline_odbc, []},
-{mod_privacy_odbc, []},
-{mod_private_odbc, []},
-{mod_pubsub_odbc, [ % requires mod_caps ...
-{mod_roster_odbc, []},
-{mod_vcard_odbc, []},
-----
-
-And finally, we're done. On a side note, you might want to uncomment the module
-mod_65[] to enable file transfers. You never know when you'll need to
-http://xkcd.com/949/[transfer a big file].
-
-
-Category:MySQL
-Category:XMPP
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Case_Insensitive_Matching_in_C++.ascii b/src/Case_Insensitive_Matching_in_C++.ascii
deleted file mode 100644
index 35cc1a7..0000000
--- a/src/Case_Insensitive_Matching_in_C++.ascii
+++ /dev/null
@@ -1,192 +0,0 @@
-Case Insensitive Matching in C++
-================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I had this epiphany yesterday while working on my new command line
-https://github.com/nullspoon/noteless[note-taking project] and I wanted to
-write a blog post about it since I haven't seen anyone on the internet yet take
-this approach (though there aren't exactly a lot blogs posts on programming
-theory of this of this kind in general).
-
-My program is written in c+\+. It provides a search functionality very similar
-to the case insensitive matching of _grep -i_ (you 'nix users should know what
-I'm talking about). If you've done much in c+\+, you likely know that string
-parsing is not so easy (or is it just different). Thus the question...__how to
-perform case insensitive text searching in c++__.
-
-A few notes though before we proceed. I'm fairly new to c\+\+ (about 9 months
-as a hobby) so everything I say here might not be entirely right (it'll work,
-it just might not be the _best_ way). If you catch something that's wrong or
-could use improvement, please send me User:Nullspoon[ an email] or leave a
-comment on the link:{{TALKPAGENAME}}[ talk page]. Secondly, since this is
-probably something the c++ gods have already mastered, I will be writing this
-post aimed at the newer folk (since I myself am one), so bear with me if you
-already know how to do this. One final note. I am still ceaselessly amazed at
-how computers work, so I get fairly giddy when it comes to actual memory
-management and whatnot. Brace yourselves...
-
-[[chars-ints-kind-of]]
-== Chars == Ints (kind of)
-
-To continue, we need to understand a few things about base data types in
-memory.
-
-* **Ints**: An int is just 8 bits of memory (well, it's 16 including
-signing, but we don't need to cover that here).
-
-* **Chars**: Chars are just ints, but marked as chars. Effectively, a
-number has been assigned to each letter and symbol (including uppercase and
-lowercase), which is where integers meet chars. The integer determines which
-char is selected.
-
-To demonstrate those two data types, let's take a look at some sample
-code.
-
-----
-using namespace std;
-#include <iostream>
-
-int main( int argc, char** argv ) {
- int i = 72;
- char c = i;
- cout << "The integer " << i;
- cout << " is the same as char " << c << "!" << endl;
- return 0;
-}
-----
-
-What we do here is create <code>int i</code> with the value of 72. We
-then create <code>char c</code> and assign it the value of _i_ (still
-72). Finally, we print both int i and char c and get...
-
-----
-The integer 72 is the same as char H!
-----
-
-If you're wondering, we could have also just assigned char c the value
-of 72 explicitly and it would have still printed the letter H.
-
-Now that that's out of the way...
-
-
-[[a-short-char---integer-list]]
-== A Short Char - Integer List
-
-* **! " # $ % & ' ( ) * + , - . /**: 35 - 47
-
-* **0-9**: 48 - 57
-
-* **: ; < = > ? @**: 58 - 64
-
-* *A - Z* (uppercase): 65 - 90
-
-* **[ \ ] ^ _ `**: 91 - 96
-
-* *a - z* (lowercase): 97 - 122
-
-
-[[lowercase-uppercase-32]]
-== Lowercase == Uppercase + 32
-
-You may have noticed an interesting fact about the numbers assigned to
-characters in [English] computing: uppercase and lowercase letters don't have
-the same integers.
-
-These character integer range seperations are key to performing a
-case-insensitive string search in c\+\+. What they mean is, if you happen upon
-the letter **a**, which is integer 97, then you know that its capital
-equivalent is going to be 32 lower (int 65). Suddenly parsing text just got a
-lot easier.
-
-
-[[piecing-it-all-together]]
-== Piecing it all together
-
-Since characters are simply just integers, we can perform text matching via
-number ranges and math operators. For instance...
-
-Suppose you want to build a password validator that allows numbers, upper case,
-lower case, and __: ; < = > ? @ [ \ ] ^ _ `__. That is the integer range 48 -
-57 (the char equivelants of integers), 58 - 64 (the first symbols), 65 - 90
-(the uppercase), 91 - 96 (the second set of symbols), and 97-122 (the
-lowercase). Combining those ranges, the allowable characters make up the
-integer range of 48 - 122. Thus, our program might look something like...
-
-----
-using namespace std;
-#include <iostream>
-
-int validate_pass( const char* pass ) {
- long i = 0;
- while( pass[i] ) {
- if( pass[i] < 48 || pass[i] > 122 ) {
- return 0;
- }
- i++;
- }
- return 1;
-}
-
-int main( int argc, char** argv ) {
- // The first password that meets the requirements
- const char* pass = "good_password123";
- cout << pass;
- if( validate_pass( pass ) ) {
- cout << " is valid." << endl;
- } else {
- cout << " is not valid." << endl;
- }
-
- // The second password fails because ! is int 35, which is out of range
- const char* pass2 = "bad_password!";
- cout << pass2;
- if( validate_pass( pass2 ) ) {
- cout << " is valid." << endl;
- } else {
- cout << " is not valid." << endl;
- }
- return 0;
-}
-----
-
-Will output...
-
-----
-good_password123 is valid.
-bad_password! is not valid.
-----
-
-The first password succeeds because all of its characters are within the range
-of 48 - 122. The second password fails because its final character, the "!", is
-int 35, which is outside of the allowable character range of 48 - 122. That
-brings a whole new meaning to the out_of_range exception, doesn't it?
-
-That's just one simple example of how this could work. One personal note,
-please don't put that restraint of > 48 on your users if you write a validator
-script. Not having access to the more common symbols is a nightmare for users.
-
-If you would like to see another example, the one I wrote for case insensitive
-matchings in my note program can be found at
-https://github.com/nullspoon/noteless/blob/master/src/lib/note.cpp in the
-*note::line_matches* method.
-
-Hopefully this is useful for someone besides myself. Either way though, I'm
-still super excited about the ease of making real-life data programatically
-usable through conversion to integers. It makes me want to see what other
-real-life data I can convert to numbers for easier parsing. Images? Chemistry
-notation?
-
-I do say my good man, http://www.bartleby.com/70/1322.html[Why, then the
-world’s mine oyster, Which I with numbers will open.] (okay, I may have
-modified the quote a tad)
-
-
-Category:Programming
-
-Category:C\+\+
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Cell_Provider_Comparison.ascii b/src/Cell_Provider_Comparison.ascii
deleted file mode 100644
index 28e1ad9..0000000
--- a/src/Cell_Provider_Comparison.ascii
+++ /dev/null
@@ -1,44 +0,0 @@
-Cell Provider Comparison
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A few years ago, I was researching cell phone providers, and noticed a distinct
-lack of _real_ information for an accurate comparison. It seems that everyone's
-monthly rates are comprised of different numbers; they charge up front for the
-phones, or they don't, or they kind of do; all of the devices cost different
-amounts between the different providers; etc. The only real number that was of
-any use, was calculating the total 24 month cost (the duration of a contract,
-whether they say they have them or not), since in the end that's what really
-mattered. Consequently, I decided to build myself a spreadsheet containing all
-of this information in an attempt to gather it all in one useful place.
-
-[[a-few-preliminary-notes]]
-A few Preliminary Notes
-~~~~~~~~~~~~~~~~~~~~~~~
-
-* The information in this spreadsheet _should_ be accurate as of
- **2014.12.22**.
-
-* The T-Mobile "Monthly Cost (no phone)" cell is accurate up to three
- phones because I haven't written the logic into the cell to handle more than
- that.
-
-* Each cell who's value isn't immediately obvious most likely has a
- comment. Please hover over the cell to see each comment for more information.
-
-* Each of the values that has something to do with a phone compares the
- LG G3 where possible for as accurate a comparison as possible.
-
-[[the-files]]
-The Files
-~~~~~~~~~
-
-* For *OpenOffice/LibreOffice* users: link:files/Cell_comparison.ods[Cell_comparison.ods]
-* For *Microsoft Office* users: link:files/Cell_comparison.xlsx[Cell_comparison.xls]
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Changing_the_Hostname_on_a_Linux_Box.ascii b/src/Changing_the_Hostname_on_a_Linux_Box.ascii
deleted file mode 100644
index 7ea649a..0000000
--- a/src/Changing_the_Hostname_on_a_Linux_Box.ascii
+++ /dev/null
@@ -1,58 +0,0 @@
-Changing the Hostname on a Linux Box
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently had need to change a server name due to a change in our server
-naming scheme (local ski resorts to breweries). For the simple comedy of the
-naming scheme switch, here's how it currently looks (without server prefixes
-for security purposes of course). If you just want the solution, skip down a
-paragraph.
-
-Our current environment is mostly virtualized. The hosts are named after
-breweries and their virtual guests are named after the beers that each brewery
-produces. Clever, yeah? I can already feel my morale rising.
-
-
-
-First off, open up a terminal window
-
-image:files/terminal001.png[height=400]
-
-Then type
-
-image:files/terminal002b.png[height=200]
-
-The file that comes up should contain nothing but a hostname. In my case this
-is . (Ctrl + x closes the file, y says to save the file before closing, Enter
-saves the file under the original filename).
-
-image:files/terminal003.png[height=300]
-
-Once you've done this, all you need to do is restart your computer and you
-should be golden.
-
-Here's how we fix the aforementioned issue.
-
-If you've closed your terminal for the restart, open it up again. Type *sudo
-nano /etc/hosts*
-
-image:files/terminal004.png[height=300]
-
-At the top you should see 127.0.0.1, 127.0.1.1 and their associated
-'hostnames'. The one to the right of 127.0.1.1 should show your old hostname.
-Change that to the new hostname and save the file (Press Ctrl + x -> y ->
-Enter). Now your computer's IP address should resolve to its new hostname.
-
-Enjoy!
-
-Now for a cup of joe...
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Church_Media_Computer_Setup.ascii b/src/Church_Media_Computer_Setup.ascii
deleted file mode 100644
index 8f2c99f..0000000
--- a/src/Church_Media_Computer_Setup.ascii
+++ /dev/null
@@ -1,108 +0,0 @@
-Church Media Computer Setup
-===========================
-
-My church recently got a new media computer. We don't really have a defined
-process for setting one up and I haven't really seen any recommendations on the
-interwebs, so here I am documenting all the things that I have learned to do
-throughout my time of running media and now also use. Some of these things may
-seem a little strict, so please take what you consider to be the meat and spit
-out what you consider to be the bones.
-
-[[desktop]]
-== Desktop
-
-[[desktop-background]]
-=== Desktop Background
-
-Make sure the desktop background is set to solid black. This is in case the
-media software crashes or you need to transition from something like your media
-software to powerpoint or vlc (assuming your media software doesn't support
-playing those in-house). With this, you can fade to black, minimize/close, and
-launch your next program with nothing but a nice smooth fade to black and back.
-
-
-[[system-sounds]]
-=== System Sounds
-
-Next up, be sure to turn off all system sounds. My church plays its music
-before and after service from the media computer (who's doesn't?) and system
-sounds are pretty intrusive in the middle of some Mumford and Sons.
-
-[[users]]
-=== Users
-
-This is something that has a lot of differing opinions. _In my opinion_, there
-should be a media account and an administrator account. If you church has the
-resources to have a dedicated media computer not used for anything else, it
-should, unless of course that is deemed not being a good steward of resources
-(it does after all stay turned off nearly all week... I'm on the fence about
-this one). Nevertheless though, it is typically considered a best practice to
-have your users be users instead of administrators. Otherwise, you'll likely
-end up with every browser and music player imaginable installed, possibly along
-with some viruses as well. I once cleaned up a media computer that had Virtual
-Box installed on it with an Ubuntu virtual machine set up. It was an experiment
-no one ever bothered to clean up and it booted with the system, taking up lots
-of cpu time, memory, and storage (40G).
-
-Having your user types be seperate also helps with clutter. Photo editing
-usually doesn't require access to the Sunday service video backgrounds, song
-texts, etc. Likewise, your Sunday media folks don't need to see the unrelated
-clutter created by editing photos.
-
-
-[[media-software]]
-== Media Software
-
-[[all-in-one-place]]
-=== All in One Place
-
-It's generally best to consolidate all of your media resources into one place.
-This might include announcement videos, countdown videos, background videos and
-images, etc. Be sure all of your people running the media computer know where
-this is so they know where to look and they know where to put new files. On
-Windows, I typically have a setup within the User's directory that looks
-somewhat like
-
-*C:\Users\Media\Media* (Yep, there's two so we can have seperate documents,
-downloads, etc)
-
-* *Pictures*
-** Foregrounds
-*** Announcements (this one might contain dated sub-directories so you can
- track what was used and when)
-** Backgrounds
-* *Videos*
-** Foregrounds
-*** Announcements (this one might contain dated sub-directories so you can
- track what was used and when)
-** Backgrounds
-* *Bibles* (in case your media software needs a place to store this)
-
-
-[[image-and-video-formatting]]
-=== Image and Video Formatting
-
-Make sure your default settings are in place for aspect ratios of imported
-files. Most of the time, you want to your backgrounds to __scale to fill__, not
-fit, as sometimes your text runs right up against one of the slide borders.
-Videos you typically want to _scale to fit_ since they can often have things
-you want to focus on on the sides and you don't want that cut off.
-
-
-[[fonts]]
-=== Fonts
-
-If your media software supports it, set the default font sizes, alignment, and
-other styles for songs, announcements, Bible excerpts, etc. While adjusting
-these per song may not be difficult, it adds more time to creating a
-presentation. Occasionally there are oversights and songs, Bible verses, or
-anouncements are missed and need to be added on the fly. Having a "sane
-default" means you can add something and it will at the very least not have
-text spilling off the sides, a font far too large, or any number of other
-things.
-
-
-Category:Drafts
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Command_Line_Auto-Complete.ascii b/src/Command_Line_Auto-Complete.ascii
deleted file mode 100644
index 49476a3..0000000
--- a/src/Command_Line_Auto-Complete.ascii
+++ /dev/null
@@ -1,52 +0,0 @@
-Command Line Autocomplete
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I was working with a fellow aspiring Linux user today on a project with him
-(yes, I'm talking about you Eric) and I noticed that when he would 'cd', he
-would completely type the folder name, even when it was apache-tomcat-6.0.26.
-If you've been using Linux for any amount of time, this might give you a
-headache because you know about Bash's autocomplete feature. I mentioned it to
-him and after seeing what it did, he exclaimed that he had been looking all
-over for that and was never able to find it.
-
-Note that this little trick works on windows command line as well as linux/unix
-command line (most shells support some variant of this - bash, zsh, ksh). While
-the two categories behave slightly differently, it still provides a
-functionality that is comparable to that of a breath of fresh air in a sewage
-treatment plant.
-
-For those of you who are impatient, **the auto-complete feature of bash is
-executed with the <tab> key**.
-
-How is it used? We'll use linux for this example.
-
-If you type ...
-
-----
-cd /usr/li  &lt;tab&gt;
-----
-
-...the shell will fill in /usr/lib. Now, you will notice that there are two
-directories in the /usr/ directory that start with lib. If you then proceed to
-type...
-
-----
-cd /usr/libe  &lt;tab&gt;
-----
-
-...the shell will fill in /usr/libexec.
-
-Neat,  huh?
-
-
-
-Category:Linux
-Category:Bash
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Comparing_Remote_Files_Without_Breaking_a_Sweat.ascii b/src/Comparing_Remote_Files_Without_Breaking_a_Sweat.ascii
deleted file mode 100644
index 54110ca..0000000
--- a/src/Comparing_Remote_Files_Without_Breaking_a_Sweat.ascii
+++ /dev/null
@@ -1,57 +0,0 @@
-Comparing Remote Files without Breaking a Sweat
-===============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Today I needed to compare a relatively large list of remote files to a local
-copy. Yep, you guessed it... it's auditing time again!
-
-Here's what my setup looks like.
-
-From various other commands (lots of pipes), I parsed my kludgy list of server
-metadata down to a list of servers that I needed to check. On that note,
-despite the really terrible methods we're using to track this kind of
-information, I really do enjoy the challenge of having to write a bash or perl
-one liner to parse the output of some badly formatted unknown space quantity
-delimited data whose case is most likely wrong, trimming multiple spaces,
-fixing the case, grabbing the columns I need, and redirecting to a file for
-later use. My thanks to the folks a la GNU for cat, tr, cut, grep, and still
-again tr.
-
-Anyways, back to the topic at hand. We now have a list of server hostnames, one
-per line. As they say, "Hey guys, watch this!"
-
-----
-for s in `cat list.txt`; do echo -e "\n\n### $s" >> diff.txt; diff <( ssh root@$s cat /etc/sudoers ) sudoers >> diff.txt; done
-----
-
-So what have we here?
-
-Firstly, we start up a bash for loop. This will make $s equal to the
-name of each of the servers as we loop to them.
-
-Now, inside of the loop we first echo the server's name ( $s ) so we've
-got a marker to tell us which diff we're looking at. After that, the fun
-happens.
-
-----
-diff <( ssh root@$s cat /etc/sudoers ) sudoers >> diff.txt
-----
-
-Here, we are running the diff command to diff the remote file ( +<( ssh root@$s
-cat /etc/sudoers )+ ) with the local file ( sudoers ), and we are redirecting
-the output to diff.txt. What's neat about this (I think it's neat at least) is
-the +<()+ bit. This is called
-http://www.gnu.org/software/bash/manual/bashref.html#Process-Substitution[process
-substitution]. It allows us to take the output of a command and use it as if it
-were the contents of a file.
-
-
-Category:Bash
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Compiling_KeePassX_2_from_Source_with_Qt_4.8.0.ascii b/src/Compiling_KeePassX_2_from_Source_with_Qt_4.8.0.ascii
deleted file mode 100644
index 54368bc..0000000
--- a/src/Compiling_KeePassX_2_from_Source_with_Qt_4.8.0.ascii
+++ /dev/null
@@ -1,78 +0,0 @@
-Compiling KeePassX 2 from Source with Qt 4.8.0
-==============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I am a huge http://keepass.info/[KeePass] fan. I started using it back in
-college (which was oh so long ago...yyeeaaahhh) when I was using Windows. I
-later switched to using Linux full time (Ubuntu is definitely a gateway
-distro). Sadly, I had to leave behind the wonders of the kdbx format for kdb
-because the Linux KeePass version, http://www.keepassx.org[KeePassX], doesn't
-support the newer format. But hey, beggers can't be choosers, right?
-
-A few months back, the developer for KeePassX, debfx, posted on his blog
-http://www.keepassx.org/news/2010/09/242[here] that he was completely
-rebuilding KeePassX from scratch. With that, I headed straight on over to his
-gitorious page and grabbed his source code. Upon trying to build from source, I
-met a few issues. First off, here's what I typed to get started.
-
-From a directory called build inside of the master directory, I ran the
-following:
-
-----
-cmake .. -DCMAKE_INSTALL_PREFIX=~/Desktop/keepassx/keepassx
-make
-----
-
-Running cmake worked fine, but when I ran make I received the following errors.
-
-----
-/usr/include/QtCore/qscopedpointer.h:207:1: error: stray ‘`’ in program
-/usr/include/QtCore/qscopedpointer.h: In member function ‘const T& QScopedArrayPointer<T, Cleanup>::operator[](int) const’:
-/usr/include/QtCore/qscopedpointer.h:226:9: error: ‘r’ was not declared in this scope
-/usr/include/QtCore/qscopedpointer.h:226:11: error: ‘turn’ was not declared in this scope
-/usr/include/QtCore/qscopedpointer.h:226:16: error: expected ‘;’ before ‘this’
-----
-
-Oddly it would seem we have a problem with our QtCore stuff. Here's how we fix
-this. In my case, I only had to change two things. Both changes need to be made
-to **/usr/include/QtCore/qscopedpointer.h**.
-
-Firstly, head down to line 207. It will look like
-
-----
-template <typename T,`typename Cleanup = QScopedPointerArrayDeleter>T> >
-----
-
-Remove the `
-
-----
-template <typename T,typename Cleanup = QScopedPointerArrayDeleter<T> >
-----
-
-Secondly, head down to line 226 which should look like
-
-----
-r%turn this->d[i];
-----
-
-Change the % to the letter e
-
-----
-return this->d[i];
-----
-
-Once you've done that, go back and run the cmake, make, and make install
-commands and you should be set. It looks like a lot of work has yet to be done,
-but overall it's looking really great. Serious props to debfx for working on
-KeePassX. I'm really looking forward to this.
-
-
-Category:Linux
-Category:Linux_Applications
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii b/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii
deleted file mode 100644
index a30a6b6..0000000
--- a/src/Compiling_MariaDB_:_cannot_find_ncurses:_File_format_not_recognized.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-Compiling MariaDB:cannot find ncurses: File format not recognized
-=================================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This week I have been trying to upgrade my MariaDB instance to latest and have
-been having some problems with getting it to compile right. My first issue was
-that it couldn't find the ncurses libraries, so I had to grab and compile the
-source for that (I'm trying to run everything in userspace). Once I did that, I
-specified the "--with-named-curses-libs=[DIR]" switch and began my
-re-configure. Once I did that, I received the following error:
-
-----
-/usr/bin/ld: cannot find /home/mariadb/ncurses/: File format not recognized
-collect2: ld returned 1 exit status
-make[2]: *** [mysql] Error 1
-make[2]: *** Waiting for unfinished jobs....
-make[2]: Leaving directory `/home/mariadb/mariadb-5.3.3-rc-src/client'
-make[1]: *** [all] Error 2
-make[1]: Leaving directory `/home/mariadb/mariadb-5.3.3-rc-src/client'
-make: *** [all-recursive] Error 1
-----
-
-I searched around for some time and could not find the answer until I happened
-upon something unrelated that pointed me towards the
-*--with-client-ldflags=-all-static* switch. I threw that switch onto the end
-of my configure string and presto!
-
-
-
-Category:MariaDB
-Category:MySQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii b/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii
deleted file mode 100644
index f9176f1..0000000
--- a/src/Compiling_nginx_for_Solaris_10_-_Configure:_test:_argument_expected.ascii
+++ /dev/null
@@ -1,54 +0,0 @@
-Compiling Nginx on Solaris 10 - Configure:test:argument expected
-================================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Yesterday I was working on compiling nginx on one of our solaris boxes at work
-(someone please tell me why companies still choose Solaris over the various
-other unix or linux distros out there) and I ran into a problem. When I ran
-configure with any options, I saw the following error:
-
-----
-. / Configure: test: argument expected
-----
-
-And if you try to run make or gmake after that you get this error
-
-----
-Make: Fatal error: Command failed for target `objs / src / core / nginx.o
-----
-
-That's no fun, huh? Well, I searched around for a while and found the solution
-http://forum.nginx.org/read.php?21,220311,220313[here], which happened to be in
-Russian (Dear
-http://translate.google.com/translate?hl=en&sl=ru&tl=en&u=http%3A%2F%2Fforum.nginx.org%2Fread.php%3F21%2C220311%2C220313[Google
-Translate]).
-
-Basically, the problem was that the version of sh that Solaris 10 defaults to
-is very old and not POSIX compliant (go figure). The solution is to change the
-configure script to use a different version of sh. At the top of your config
-file, change the following line from
-
-----
-#!/bin/sh
-----
-
-to
-
-----
-#!/usr/xpg4/bin/sh
-----
-
-Rerun your configure script with all of your switches and all should be well
-(yay!). Once you've done that, gmake/make should run without a hitch (at least
-not due to this problem we hope).
-
-
-Category:Solaris
-Category:Nginx
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii b/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii
deleted file mode 100644
index 0a83bea..0000000
--- a/src/Configuring_Status.Net_for_NGINX_in_a_Subdirectory.ascii
+++ /dev/null
@@ -1,63 +0,0 @@
-Configuring Status.Net for NGINX in a Subdirectory
-==================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This morning I tried to get status.net to work from a subdirectory of my main
-site, a task which proved to be quite frustrating, especially for someone who's
-not too great at rewrite rules in apache, let alone NGINX. Unfortunately,
-there is also not much documentation on this topic online since status.net does
-not officially support NGINX. That's okay though. I don't know much about
-rewrites, since they use regex, it seems you should be able to make just about
-anything work (I could be wrong about that though).
-
-To get this to work, we first need a location directive for our main site. That
-would look something like
-
-----
-location / {
- index index.php;
- try_files $uri $uri/ @rewriteSection;
-}
-location @rewriteSection {
- rewrite (.*blah.*) index.php?q=$1;
-}
-----
-
-Now that we have that, we can go ahead and put our subdirectory directive in
-here. For the purposes of this demonstration, our status.net instance will be
-running in a directory called testsub.
-
-----
-location /testsub {
- index index.php;
- try_files $uri $uri/ @testsub;
-}
-location @testsub {
- ## FOR FANCY URLS FALSE
- ## rewrite ^/testsub/index.php/(.*)$ /testsub/index.php?p=$1 last;
- ## FOR FANCY URLS TRUE
- rewrite ^/testsub/(.*)$ /testsub/index.php?p=$1 last;
-}
-----
-
-
-To make this work for your instance, all you should need to do is swap out the
-testsub directory references for the directory your status.net instance is
-running in and you should be set. Keep in mind though that by default,
-status.net has fancy URLs disabled. That means you'll have to use the first
-rewrite line. If fancy URLs are turned on, then you should use the second
-rewrite line. That should be it!
-
-Yay microblogging!
-
-
-Category:Nginx
-Category:Status.Net
-Category:Blogging
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Converting_Disks_in_Hyper-V.ascii b/src/Converting_Disks_in_Hyper-V.ascii
deleted file mode 100644
index e7ee1c3..0000000
--- a/src/Converting_Disks_in_Hyper-V.ascii
+++ /dev/null
@@ -1,65 +0,0 @@
-Converting Disks to Hyper-V
-===========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently have been running into space issues. I had a four hundred gigabyte
-hard drive that had almost not space remaining. It stored 8 running servers
-with dynamically expanding disks that had a maximum size of 40 gigabytes (on
-the host it was almost 42 gigabytes). I also had to store a backup server image
-(sysprepped) for copying to quickly create a new server if needed.
-Additionally, one of the hard drives was 70 gigabytes, not 40. All around, the
-total came to 459 gigabytes if each hard drive expanded to its largest.I
-suggested to my boss that we should convert each disk to fixed rather than
-dynamic disks to improve performance. He agreed and I began my search for how
-to convert disks. Much to my delight, converting a disk is a relatively
-painless (albeit slow) process. Here's how:
-
-For starters, *From the window that comes up, *scroll down in*
-
-*A server must be either off or paused to convert the disk (I chose off
-for my conversions)*
-
-Alright, now that that's out of the way, right click the server you want to
-convert.
-
-image:files/01_ClickSettings_-_X.jpg[height=300]
-
-From there,*. In my case I only had one hard drive 'attached' to my servers.
-
-From here,
-
-image:files/02_HardDiskMainSettings_-_X.jpg[height=300]
-
-The window that comes up (Edit Virtual Hard Disk Wizard) will start on
-the "Locate Disk" step but should continue to the next step
-automatically in a few seconds. On the "Choose Action" step, (second
-down from the top).
-
-image:files/04_HardDiskPreConvert_-_X.jpg[height=300]
-
-This will take you to the "Convert Disk" step. Here you need to set the
-destination filename for the conversion. In my case, I just selected the old
-file from the browse window and added a "_fixed" at the end of the filename.
-Any naming scheme works though of course.
-
-image:files/07_HardDiskConverting_-_X.jpg[height=300]
-
-From here, just sit back and relax.
-
-The conversion speed at the beginning can be a little deceiving. Mine got
-almost 25% done in the first five minutes. It actually took it about an hour to
-complete for forty gigabytes though. Reminds me of a cell phone. It reads full
-battery power until it has none left and then it says it has none five minutes
-before turning off.
-
-
-Category:Microsoft
-
-Category:Virtualization
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Converting_Hyper-V_VHDs.ascii b/src/Converting_Hyper-V_VHDs.ascii
deleted file mode 100644
index 9585a29..0000000
--- a/src/Converting_Hyper-V_VHDs.ascii
+++ /dev/null
@@ -1,53 +0,0 @@
-Converting Hyper-V VHDs
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently was assigned the task of rebuilding our Team Foundation Server with
-TFS 2010 for many reasons. One of those is because the old one has a VHD that
-is consuming far more resources than it should be (it's a 100 gigabyte vhd and
-only 8 gigabytes are in use). I seemed to recall somewhere that Hyper-V could
-"compact" a virtual hard drive, but I couldn't remember where. After doing a
-bit of searching around Hyper-V, I found what I needed. Here's a few facts
-about this before getting started.
-
-First, shrinking a hard drive only applies to Dynamically sizing disks. Since
-these do no shrink on their own (there's a lot of reasons why) but only grow,
-they might need to be compacted later to free up host space. It only reduces
-the .vhd file size by shrinking the 'shell' (if you will) to take up the drive
-space that is not being used by the guest OS.
-
-In my situation, I was dealing with a drive that was not dynamically sizing but
-was static. If the vhd is static, the Compact button will not show up when you
-go to edit the drive.
-
-In my case, I did not have to compact the drive. As I said, a drive cannot be
-compacted unless it is dynamic. Since mine was static, I converted it to
-dynamic to regain the compacting functionality but because of the way the
-conversion process works, it automatically 'compacts' the .vhd. My original
-static .vhd was 100 gigabytes. The output was 15.5 gigabytes.
-
-Though I did not have to compact my .vhd because the conversion process did it
-for me, I'm going to put the instructions on how to compact the .vhd anyways.
-
-For starters, the virtual machine that the hard drive is attached to must be
-turned off. Once the server is offline, from within the Hyper-V Manager (it's
-an mmc snap-in) go to the virtual machine's properties (right click the machine
-and select properties). Select the drive you want to shrink on the left panel
-that lists the various devices attached to the virtual machine. After selecting
-the drive, on the right panel, select Edit. This will bring up a window that
-tells you what editing a drive does and gives you the option to not show that
-screen again. Click Next. From here you should have three options (unless the
-.vhd is static). Select Compact and click Next. Finally, click Finish and
-Hyper-V Manager will shrink the .vhd as much as it can.
-
-
-
-Category:Microsoft
-Category:Hyper-V
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Cool,_Fun,_and_Mostly_Useless_Things_to_do_with_Linux.ascii b/src/Cool,_Fun,_and_Mostly_Useless_Things_to_do_with_Linux.ascii
deleted file mode 100644
index a9eb9c8..0000000
--- a/src/Cool,_Fun,_and_Mostly_Useless_Things_to_do_with_Linux.ascii
+++ /dev/null
@@ -1,139 +0,0 @@
-Cool, Fun, and Mostly Useless Things to do with Linux
-=====================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I frequently find myself on a weekend without much to do, wishing I could have
-some cool new project to put on my Linux server at home. I've implemented
-several of my ideas and occasionally come up with one more, but when I'm
-starving for one and don't have one on the cooker at the moment, finding a cool
-Linux project idea on the internet can be quite the task so much so that you
-can make searching for one a weekend project in itself...until of course you
-get so frustrated with not finding one that you want to commit some uncivilized
-acts to various (hopefully) inanimate objects near you.
-
-If that describes you, hopefully this post will help.
-
-
-[[linux-general]]
-=== Linux General
-
-Not all of these projects require a "Linux server" or a "Linux desktop".
-Most of these are quite doable with either category.
-
-* Synchronize your Android phone with your Linux, Windows, Mac, or Unix systems
- using https://syncthing.net/[Syncthing], the peer to peer, distributed,
- self-hosted synchronization client/server.
-
-* Write a script in your language of choice that uses
- https://rsync.samba.org/[rsync] and ssh as the transfer protocol/client, to
- backup each of your linux or unix boxes (rsync is available for both).
-
-
-[[linux-as-a-server]]
-=== Linux as a Server
-
-This category we can safely say requires a server with a static ip
-address (or at least dynamic dns), and will be running most of the time,
-hence the server category. These would be good on an internal network
-where you control the gateway and/or dhcp, or on an external internet
-with a vps or business-class internet (most ISPs don't give out static
-IPs unless they are explicitely requested).
-
-* Build an IRC server using http://www.ircd-hybrid.org/[ircd-hybrid]
-
-* Build an XMPP/jabber server for chatting with your GMail friends (or
- whoever else uses xmpp) using http://www.ejabberd.im/[ejabberd] or
- http://jabberd2.org/[Jabberd2] (this is the one I use)
-
-* Build a Ventrilo server using http://www.ventrilo.com/[Ventrilo] (duh).
- Useful for gamers who want actual "chat room" functionality where you can
- talk, and the room can hear you. Not super useful though as a voip
- application.
-
-* Take webcam photos from command line using the
- http://www.firestorm.cx/fswebcam/[fswebcam] package
-
-* Set up a motion-detecting webcam using the
- http://www.lavrsen.dk/foswiki/bin/view/Motion/WebHome[motion] package
-
-* Build a media server that works with your PlayStation 3 using
- http://mediatomb.cc/[MediaTomb]
-
-* Set up an IRC bot to connect to previously made IRC server using
- http://www.eggheads.org/[eggdrop]
-
-* Build a DLNA media streaming server using
- https://wiki.archlinux.org/index.php/MiniDLNA[miniDLNA] (recently renamed
- http://sourceforge.net/projects/minidlna/[ReadyMedia]) and use it to stream
- your music, video, and pictures to your Playstation 3, Windows computer,
- Android phone, or whatever you want that supports UPnP.
-
-* Build a mail server using http://exim.org/[Exim]
-
-
-[[linux-as-a-desktop]]
-=== Linux as a Desktop
-* Download all of your email locally using http://offlineimap.org/[offlineimap]
- and set up command line mail using http://www.mutt.org/[mutt].
-
-* Encrypt some or all of your home directory using http://ecryptfs.org/[eCryptfs] and symlinks.
-
-* Learn a new window manager, preferably something complicated and fun like
- http://i3wm.org/[i3] (it's a tiling window manager that's designed for
- keyboards).
-
-* Learn to use http://tmux.sourceforge.net/[tmux] (terminal multiplexer) like a
- pro (attach, detach, new panes, new windows, split windows, etc).
-
-* Get a usb stick or external hard drive and install Linux on it,
- bootloader and all.
- http://en.wikipedia.org/wiki/Universal_Serial_Bus#USB_2.0_.28High_Speed.29[USB
- 2.0] transfers at roughly 35 MB per second and
- http://en.wikipedia.org/wiki/Universal_Serial_Bus#USB_3.0_.28Super_Speed.29[USB
- 3.0] transfers at roughly 400 MB per second, so you won't see too bad of
- performance.
-
-** I'd like to take a second to expound on this one because it's been
- particularly useful to me. Having this kind of a setup allows me to use any
- computer hardware that supports booting from USB as my computer. It's great
- for fixing friend's computers, it's great in case your laptop battery dies,
- it's more portable than a laptop (assuming you'll have a computer you can
- use whever you're going), you can run Linux at work without angering the
- desktop team who built your work computer. When you go on trips, you don't
- have to bring both your personal laptop and your work laptop. You just need
- one computer and your little hard drive. It's really a handy thing to have.
-
-* If your work uses Cisco VPN, you can go in using
- http://www.unix-ag.uni-kl.de/~massar/vpnc/[vpnc] (Gnome and KDE GUI
- implementations are readily available).
-
-** **Disclaimer**: I am not responsible for any repercussions of doing this.
- Think before doing this one. Some companies have policies against computers
- they didn't build being on their networks.
-
-* Write http://docs.ansible.com/index.html[Ansible] scripts to build any number of these projects. Another good
- option is to write ansible scripts to build out any of your Linux laptops
- (sotware manifests, adding users with consistend uids/gids, sudoers configs,
- etc). I'm not trying to start a flame war. Ansible is just the easiest to get
- set up without needing a server already running (okay fine, there's always
- puppet apply and chef solo).
-
-* Learn to install https://www.archlinux.org/[Arch Linux]. That may sound
- trivial, but if you haven't been doing Linux for long, or are familiar with
- the standard desktop distro installation process, this can be very a very
- informative project. Suffice to say, afterwards you will understand much more
- about how your computer and Linux operate. The beginners' guide can be found
- https://wiki.archlinux.org/index.php/Beginners%27_guide[here].
-
-
-Hopefully that's enough to get you started. Enjoy!
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Cool_Vim_Trickery.ascii b/src/Cool_Vim_Trickery.ascii
deleted file mode 100644
index cc5e293..0000000
--- a/src/Cool_Vim_Trickery.ascii
+++ /dev/null
@@ -1,115 +0,0 @@
-Today I was toying with something absolutely not work related and I wanted to
-share it with somebody to show how awesome the vim text editor can be.
-
-First though, I really would like to thank Bram Moolenaar for contributing such
-a useful tool to the world. It may not be the easiest thing in the world to
-learn, but once you've got even the most basic functionality figured out, you
-can do so much more than other editors will allow. That all goes without even
-saying how cool its interface is. If you realy like vim, you should head on
-over to http://www.vim.org/index.php[his website] and buy a
-http://www.freewear.org/?org=Vim[tshirt],
-http://www.vim.org/stickers.php[sticker], or a
-http://www.vim.org/posters.php[poster].
-
-What I was dabbling with was vim colors for syntax highlighting. It turns out
-that the stock install of vim for Arch linux comes with almost 200 color
-schemes. I really wanted to see them all but didn't want to have to keep typing
-":colors schemename". That is a lot of repeat key presses after all, something
-we Linux folk really are not fond of when faced with a situation that a
-computer can handle for us (automation - what a novel idea).
-
-After some searching, I discovered
-http://vim.wikia.com/wiki/Switch_color_schemes#Switching_colors[this vim
-script] that will change your color scheme forwards or backwards by pressing F8
-or Shift+F8, respectively. Really neat, but not super automated still. Who
-wants to set this sucker to a timer and watch it switch every 200 milliseconds?
-I do I do I do!
-
-That vim script provides a few functions that are bound to the afforementioned
-hotkeys. The function we are immediately concerned with is called
-_NextColor_. This will switch the color scheme to the next in the list.
-
-Here's where vim gets really cool, even though it already is.
-
-It turns out that there is a list in this vim script that is a statically coded
-array of scheme names, so if you have more themes installed than those listed
-in the array, you're out of luck unless you manually add them. Now, at this
-point we could probably have vim run a shell command and massage the output to
-make an array for us at runtime, but where's the fun in that (that's just a
-little TOO automated for the purposes of this article)? I want to rock some vim
-find and replace regex!
-
-
-[[inserting-shell-command-output]]
-== Inserting Shell Command Output
-
-So now, the first thing we're going to do is insert the output of a shell
-command to our vim file, specificall +ls -1+. When in command mode, run
-
-----
-:read !ls -1 /usr/share/vim/vim73/colors/
-----
-
-This should insert a metric bitt load (teehee) of lines if you have very
-many color schemes.
-
-
-[[ranges-in-regex]]
-== Ranges in Regex
-
-From here, we want to massage the data with a few vim find and replace regexes.
-Establish the line that your file list ends at. For me, this was line 207, but
-this very likely won't be the case for you. Move the cursor to the first line
-and run the following in command mode
-
-----
-:.,207s/\.vim//
-----
-
-This will do a find and replace on the text range starting where the cursor is
-currently (the .) and ending at line 207 (the 207). After that it's just a
-standard regex substitution. This should chop off the '.vim' at the end of each
-filename.
-
-Next, we need to remove the new lines, comma delimit, and encase in single
-quotes to match the array format. Again, place your cursor at the first line of
-your list. Remember the line number of the last line in the list?
-
-----
-:.,207s/\(.*\).vim\n/'\1', /
-----
-
-In this cryptic regex, we replace from the current line (the .) to line 207 any
-line containing .vim with a line break after it (the .vim\n) with the text
-preceeding the .vim ( captured by +<\(.*\)+), encasing that value with single
-quotes and ending with a comma space (the +'\1',+) encase the entire string
-with a [ ] and you'll be set. Just erase the old array set your new one to
-+s.mycolors+ near the top.
-
-
-[[setting-the-rotate-timer]]
-== Setting the Rotate Timer
-
-Now there's one piece left: set the timer. In command mode, do the following
-and hit enter
-
-----
-:while 1 | sleep 1000m | call NextColor(1) | endwhile
-----
-
-That will rotate through every theme you just added to your array every 1000
-milliseconds. Just change the +1000m+ to whatever you want to make it update at
-different intervals.
-
-Hello worthless but super awesome functionality!
-
-----
-:while 1 | sleep 1000m | call NextColor(0) | endwhile
-----
-
-
-Category:Linux
-Category:Vim
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii b/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii
deleted file mode 100644
index 3282b7f..0000000
--- a/src/Creating_Search_Engine_Optimized_Drupal_URLS.ascii
+++ /dev/null
@@ -1,55 +0,0 @@
-Creating_Search_Engine_Optimized_Drupal_URLS
-============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A big piece to search engine optimization is how your URLs are structured. A
-ways back, I was talking to a buddy of mine who does SEO for a living and he
-suggested that I use WordPress' URL rewrites to make my URLs friendlier. I went
-ahead and set my blog up for a 'yyyy/mm/dd/title' format and it did wonders for
-my search rankings. Recently however, I moved to Drupal which sadly does not
-automagically create the friendly aliases to your posts. There is good news
-though. In typical Drupal fashion, there's a module for that (kind of like
-"there's an app for that") and it is very customizable.
-
-To set yourself up with article urls (or blog urls) that autoalias with a
-format that you want, you need to grab two modules. First you need the
-*Pathauto* module, and that depends on the *Token* module. Before we continue,
-I'm writing this to fit a Drupal 7 scenario, so likely some of the stuff will
-be in a slightly different place if you're running 6 or 5.
-
-Now, once you have those enabled, head on over to the Configuration->URL
-aliases section of your Drupal site. Once there, select the pattern tab.
-
-Where we put our aliasing string here depends on whether your writing your
-content as a blog or an article content type.
-
-If you blog in article content types, put the following string in the
-*Pattern for All Article Paths* field:
-
-----
-[current-date:custom:Y]/[current-date:custom:m]/[current-date:custom:d]/[node:title]
-----
-
-If you blog in blog format, put the following string in the *Pattern for all
-Blog entry paths* field:
-
-----
-[current-date:custom:Y]/[current-date:custom:m]/[current-date:custom:d]/[node:title]
-----
-
-image:files/pathauto-alias-strings0.jpg[height=300]
-
-Keep in mind that I formatted those strings for blog entries. If you're doing
-basic pages or something like those, you likely won't want the format I used in
-this article. Just expand the *Replacement Patterns* section in your patterns
-tab to see what other options you have for formatting those URLs.
-
-
-Category:Drupal
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Creating_Text_Outlines_in_CSS.ascii b/src/Creating_Text_Outlines_in_CSS.ascii
deleted file mode 100644
index d66f742..0000000
--- a/src/Creating_Text_Outlines_in_CSS.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-Creating Text Outlines in CSS
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Creating outlines for text can be a very interesting problem to tackle when
-making a website. If you do any research, you'll likely find that google points
-you to w3c schools for the text-outline property. There you will find out that
-(as of 10.27.2011), that CSS 3 property is not currently supported in any
-browsers.
-
-I reached that point and started researching unorthodox ideas on the matter and
-didn't find anything directly relating, but did find one really great site
-using multiple shadows to do offset shadows (really neat stuff I might add). I
-had no idea you could put multiple shadows on a single text object! Then it
-occurred to me, I could apply multiple blurred shadows overlayed to reduce the
-feathering enough that it would look like a solid outline. Sure enough, it
-worked! Here's how you do it.
-
-In your CSS item, let's add some lines here...
-
-----
-.outline {
- text-shadow: 0px 0px 2px #000, 0px 0px 2px #000, 0px 0px 2px #000, 0px 0px 2px #000, 0px 0px 2px #000;
-}
-----
-
-And that should do it! Add that class to whatever text you're using and you
-should have nicely outlined text.
-
-
-Category:CSS
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Cyanogenmod_7_on_the_Evo.ascii b/src/Cyanogenmod_7_on_the_Evo.ascii
deleted file mode 100644
index cd848b4..0000000
--- a/src/Cyanogenmod_7_on_the_Evo.ascii
+++ /dev/null
@@ -1,81 +0,0 @@
-Cyanogenmod 7 on the Evo
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Good morning everyone.
-
-I recently switched from Verizon to Sprint because my contract ran out and
-another reason or two (they are political). In that switch, I moved from my
-Motorola Droid to an HTC Evo. I would have switched to the Epic since it is
-newer,  but it isn't nearly as far along when it comes to custom mods than the
-Evo is (the Epic doesn't even have an official release of Cyanogenmod yet).
-
-With that , three days in, I voided my phone's warranty by flashing a new Rom
-to it.
-
-Now, for those of you new to this, flashing a custom rom to your phone can do
-wonders. For instance, before I did this,  my Evo 's battery life was roughly
-ten hours. After flashing the Cyanogen mod to my phone, my battery life was
-extended at least eight hours. With some additional tweaking (which was made
-available to me by my custom rom), I now get a full day with pretty heavy usage
-and two days with light to moderate usage.
-
-Another great bit you get out of a rooted phone and custom ROMs is the ability
-to update your phone on your own, instead of having to wait the six months to a
-year it may take for your carrier to release the latest update for your phone.
-For instance, android. 2.3 (gingerbread) is already out and many phones still
-have android 2.1 (eclair). We don't even have the 2.2 (froyo) update yet.
-
-And now, without further adieu, I have flashed Cyanogenmod 7 on my phone which
-is a mod of the long awaited android 2.3, gingerbread.
-
-
---
-image:files/wpid-screenshot_31.jpg[height=400]
-
-Clearly with this one I am a Sprint customer. This is the notifications drop
-down list showing the added control widgets to the top. This is actually a
-Cyanogenmod functionality, not Gingerbread.
---
-
---
-image:files/wpid-screenshot_36.jpg[height=400]
-
-This is the lock screen if you use a pin to lock your phone. Overall, not much different here.
---
-
---
-image:files/wpid-screenshot_33.jpg[height=400]
-
-This is the phone dialer. Clearly this is very stylistically different from the
-2.1 and 2.2 dialers.
---
-
---
-image:files/wpid-screenshot_32.jpg[height=400]
-
-This is the app menu. I am actually using the Zeam launcher, but this menu
-looks the same in the default Gingerbread launcher, ADW, and Zeam.
---
-
---
-image:files/wpid-screenshot_29.jpg[height=400]
-
-This is the home screen. Once again, I'm using the Zeam launcher, so the app
-menu at the bottom looks a bit different. Take note however of the icons on the
-notifications bar at the top; they have changed significantly from 2.2. I do
-miss the gradient at the top, however not having it makes the top bar blend in
-much better with various other aspects of the phone.
---
-
-That's it for now. If you want to see screenshots of some other specific
-things, please let me know in the comments section and I'll get on those as
-soon as I can. The same goes for questions about anything Android related.
-
-Thanks for reading!
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/DD-WRT:Change_Root_SSH_Password.ascii b/src/DD-WRT:Change_Root_SSH_Password.ascii
deleted file mode 100644
index b8cb0e8..0000000
--- a/src/DD-WRT:Change_Root_SSH_Password.ascii
+++ /dev/null
@@ -1,35 +0,0 @@
-DD-WRT:Change the Root SSH Password
-===================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-<obligatory-blog-intro> ... </obligatory-blog-intro>
-
-To change your root password on your DD-WRT router (assuming you already know
-what it currently is set to), log in through SSH. It turns out that DD-WRT
-doesn't use the passwd command to set user passwords like most Linux's.
-Instead, to set your password, run the following command.
-
-----
-setuserpasswd [username] [password]
-----
-
-To give an example here
-
-----
-setuserpasswd root Jimminycr1cket4tehW1n
-----
-
-This may not work on older versions of DD-WRT. I can't say because my router,
-the Buffalo Technology WHR-HP-G300N runs DD-WRT v24-sp2.
-
-
-Category:DD-WRT
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/DNS_Backup_Script.ascii b/src/DNS_Backup_Script.ascii
deleted file mode 100644
index ad6f3db..0000000
--- a/src/DNS_Backup_Script.ascii
+++ /dev/null
@@ -1,105 +0,0 @@
-DNS Backup Script
-=================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I would like to start this post with an admission of guilt.
-
-Hello, my name is swarleyman and I'm an arstechnicaholic.
-
-Please don't judge me.
-
-Anyways, I was reading it a few days ago and read
-http://arstechnica.com/business/news/2012/03/how-anonymous-plans-to-use-dns-as-a-weapon.ars[an
-article] about how Anonymous plans to dos worldwide DNS. All the politics of
-this topic aside, it got me thinking. We are so incredibly reliant on DNS that
-if it went down, it could have catastrophic effects on society. Okay, so not
-being able to access your Facebook page for a few days might be a good thing.
-What about paying your bills (though if your bill pay system can't access their
-payment processing service, it doesn't really matter anyways)? With that, I
-decided to research a good way to back up DNS.
-
-After some searching I was, of course, disappointed. There is apparently no way
-to back up DNS. You would think that there should be some way to make a
-third-party copy, especially since it's such a vital service that's supposed to
-be relatively open. Either way, we still have a few tools to work with to make
-at least a semi-thorough backup.
-
-The tools I chose to use were perl and nslookup. Unless I'm missing something,
-I think nslookup is really the only good way to get relatively complete DNS
-data. I know you can dig stuff, but i'm not looking to back up people's cname,
-aaa, a, srv, etc. records (perhaps I'll come back and write up a script for
-that too). With that, to run this script you need a 'nix system with perl and
-nslookup installed (in the dnsutils package).
-
-What this script does is run nslookup on every host in a text file (for example
-./dnsbak.pl hostlist.txt), parse the text and format it in a hosts file format.
-All you should need to do is take the output from this script and append it to
-your hosts file and you should be back up and running.
-
-Here's teh codez!
-
-----
-#!/usr/bin/perl -w
-sub main () {
- open hosts_list, $ARGV[0] or die("\nNo file specified or file does not exist\n");
- # ONE HOST PER LINE
- my @hosts=<hosts_list>;
- close(hosts_list);
- for (my $i=0; $i<scalar(@hosts); $i++) {
- my $nslookup=`nslookup $hosts[$i]`;
- my $site = new Site($nslookup);
- $site->parse();
- sleep(1);
- }
-}
-main();
-print "\n\n";
-
-package Site;
-sub new {
- my $class = shift;
- my $self = { _nslookupData=>shift };
- bless $self, $class;
- return $self;
-}
-
-sub parse() {
- my ( $self )=@_;
- my $data=$self->{_nslookupData};
- my @data=split("\n", $data);
- my @addresses;
- my $server;
- for (my $i=0; $i<scalar(@data); $i++) {
- if ($i>=3) {
- # MATCH THE HOSTNAME
- if ($data[$i]=~/Name:\s(\w+\.\w+)/) { $server=$1; }
- # MATCH THE IP ADDRESSES
- if ($data[$i]=~/Address:\s{1,3}(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})/) { push(@addresses, $1); }
- }
- }
- if (scalar(@data) > 4) {
- print join("\t".$server."\n", @addresses);
- print "\t".$server;
- print "\n\n";
- }
-}
-----
-
-Please leave some comments on my script if you have any. I still consider
-myself quite the perl noob as I am completely self taught and don't really have
-all of the super pro +1 up perl one-liner guru experience that you perl
-veterans http://icanhascheezburger.com/[can has]. I look forward to hearing
-some feedback on my seemingly too long and verbose script.
-
-Ensign, engage.
-
-Category:Backups
-Category:Perl
-Category:Scripting
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Default_Solaris_man_Pager.ascii b/src/Default_Solaris_man_Pager.ascii
deleted file mode 100644
index ee656aa..0000000
--- a/src/Default_Solaris_man_Pager.ascii
+++ /dev/null
@@ -1,39 +0,0 @@
-Default Solaris Man Pager
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-The title of this should actually say "Default Solaris man Pager Sucks" but in
-the interest of professionalism, I left that last word out.
-
-The Solaris default pager for man pages is "more". It unfortunately doesn't
-support the friendly controls of "less", search highlighting, scrolling one
-line at a time, etc. This is less functional than using "less" for your pager.
-With that, let's have a look at how we fix this.
-
-If you would like to change it,  you need to crack open your *.bashrc* and add
-the following line...
-
-----
-export PAGER="less"
-----
-
-Tada!
-
-Launch bash and look up a man page.
-
-As a friend of mine said in regards to this, "LESS is more than MORE."
-
-
-One closing notes. This seems to be a very universal variable, so this should
-also work with csh, ksh, sh, etc, though the rc file that needs to be edited
-will be different.
-
-Category:Solaris
-Category:man
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Dell_V305_Printer_on_Linux.ascii b/src/Dell_V305_Printer_on_Linux.ascii
deleted file mode 100644
index 16da7e8..0000000
--- a/src/Dell_V305_Printer_on_Linux.ascii
+++ /dev/null
@@ -1,190 +0,0 @@
-Dell V305 Printer on Linux
-==========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I spent this week hanging out with my wife and grandparents-in-law and spent
-some of my time performing the obligatory family tech support (no complaining
-here, I love doing that most of the time). To sum up the beginning quickly
-because I really don't want to write the post in great detail, my grandfather's
-computer got temporarily hosed and the guy from Dell made it even worse (thanks
-Deepak). He actually wiped the computer after taking very minimal backups
-(thankfully just enough). Not only that, but the restore from the Dell image
-actually corrupted a bunch of the core system libraries making installing and
-updating Windows or Microsoft software impossible. After wasting an hour trying
-to fix this, I finally decided to reinstall a fresh copy of Windows. Then it
-hit me, my grandfather doesn't use his computer for much more than Word
-documents, PDFs, browsing the internet, and email - all things that Linux does
-very well. With that, I suggested to him that he try
-http://linuxmint.com/[Linux Mint] (my favorite ready-to-go Linux desktop
-distro). After he played around with the live version for a bit, he decided he
-really liked it (kudos to you Linux Mint guys) so I went ahead to install it.
-
-I got everything working easily but one thing... his printer.
-
-[[the-dell-v305-printer]]
-The Dell V305 Printer
---------------------
-
-The Dell V305 is actually a Lexmark printer rebranded as a Dell. Specifically,
-it is the Lexmark x4650. Thankfully, Lexmark makes
-http://support.lexmark.com/index?page=downloadFile&actp=CONTENT&productCode=LEXMARK_X4650&id=DR20523&segment=DOWNLOAD&userlocale=EN_US+&locale=en&oslocale=en_US[a
-linux driver] for this thing, but it is of course, very problematic. When I
-first ran the .sh with an embedded binary, it ran fine until I got to the
-install where it gave me an ambiguous "failed to install". When you click
-**OK**, it closes the window with the actually error text in it. While the
-"failed to install" dialog is up, you can't check the log because it won't let
-you select the background window. Also, the background window isn't resizable
-so you can't hope for a bigger window to compensate for no scrollback. Great
-design, huh?
-
-I did notice on the last three or so lines though that it was trying to remove
-a .deb file. With that, I set out to search for it.
-
-
-[[the-fun-begins]]
-The Fun Begins
---------------
-
-If you run the
-'http://support.lexmark.com/index?page=downloadFile&actp=CONTENT&productCode=LEXMARK_X4650&id=DR20523&segment=DOWNLOAD&userlocale=EN_US+&locale=en&oslocale=en_US[lexmark-08z-series-driver-1.0-1.i386.deb.sh]'
-file with the _--keep_ switch, the script will not remove all files extracted
-to perform the install process. This will leave you with a nicely populated tmp
-folder.
-
-If you cd into the tmp directory, you will find a file called
-**installarchived_all**. This is actually an lzma archive file. What you want
-to do now is extract this file using the following command
-
-----
-tar -xvf ./installarchived_all --lzma
-----
-
-This will extract several files, one of which will be called
-__lexmark-08z-series-driver-1.0-1.i386.deb__. You might think that this is a
-time for rejoicing, but alas it is not. From this point we should be able to
-run _dpkg -i ./lexmark-08z-series-driver-1.0-1.i386.deb_ and it would work, but
-it won't. If you do that you will receive the following friendly error:
-
-----
-dpkg: error processing ./lexmark-08z-series-driver-1.0-1.i386.deb (--install):
- parsing file '/var/lib/dpkg/tmp.ci/control' near line 9 package 'lexmark-08z-series-driver':
- blank line in value of field 'Description'
-Errors were encountered while processing:
- ./lexmark-08z-series-driver-1.0-1.i386.deb
-----
-
-What? The .deb file was constructed wrong? 'Tis a shame. Here's where it gets
-really fun. What we need to do now is extract the deb file, modify the contents
-of a single file, and repackage the whole thing back up.
-
-First, let's create a working directory, copy our deb file in there, extract
-it, and set up a deb package folder structure. Create our working directory and put the bad .deb file in there.
-
-----
-mkdir ./working
-cp ./lexmark-08z-series-driver-1.0-1.i386.deb ./working/
-cd working
-----
-
-Extract the .deb file and clean up a bit (don't forget the period at the
-end of the dpkg-deb line).
-
-----
-dpkg-deb -R lexmark-08z-series-driver-1.0-1.i386.deb .
-rm ./lexmark-08z-series-driver-1.0-1.i386.deb
-----
-
-
-[[fixing-the-problem]]
-Fixing the Problem
-------------------
-
-The problem as you like noticed earlier is because the .deb file has a file
-named _control_ that is improperly formatted. Specifically, control files
-cannot have blank lines in them. To have a "blank" line in a .deb control file,
-you must have a period instead. That said, here's how we fix the file.
-
-Open up the control file in the DEBIAN directory and put a ' .' (yes, with the
-space before it) like so
-
-----
-Description:
- Lexmark 08z Series Drivers Package
- .
- This package contains the Lexmark 08z Series Drivers. This is
- a copyrighted package, please refer to the copyright notice
- for details about using this product.
-----
-
-Now that that's done, We just need to repackage the .deb file and install it.
-To do that, cd out to one directory above the lexmark-08z directory (the
-working directory) and run **dpkg -b lexmark-08z**. This will take a few
-seconds (it's 22 megs) but it should create a file called lexmark-08z.deb. Now
-install this using **dpkg -i**.
-
-----
-dpkg -b lexmark-08z dpkg -i ./lexmark-08z.deb
-----
-
-_I'm too lazy to write the rest out right now so here's the shorthand_
-
-Now you have to edit a ton of files in __/usr/local/lexmark/08zero/etc/__.
-
-Firstly, we need to edit 99-lexmark-08z.rules and take the following
-line on the top so it looks like so
-
-----
-ATTRS{idVendor}=="413c", ATTRS{idProduct}=="5305", MODE="666"
-
-ACTION!="add", GOTO="lexmark_custom_rules_end"
-ATTRS{idVendor}=="413c", ATTRS{idProduct}=="5305", MODE="666"
-ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0142", MODE="666"
-ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0150", MODE="666"
-ATTRS{idVendor}=="043d", ATTRS{idProduct}=="013f", MODE="666"
-ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0151", MODE="666"
-ATTRS{idVendor}=="043d", ATTRS{idProduct}=="0116", MODE="666"
-LABEL="lexmark_custom_rules_end"
-
-----
-
-Now that we've updated the 99-lexmark-08z.rules file, we need to edit a load of
-the lxd*.conf files. I say we need to edit lots of them because I'm still not
-sure which one or combination of them actually did the trick. I can say though
-that just lxdm.conf wasn't enough.
-
-Now, edit the following files
-
-* lxdm.conf
-* lxdq.conf
-* lxdw.conf
-* lxdu.conf
-* lxdx.conf
-
-...and replace *all* instances of _0116_ with _5305_ and all instances of
-_043D_ with _413C_
-
-Once that is done, add your printer from the cups admin console
-(localhost:631). Once you get to the driver part, select Lexmark 3600-4600 and
-you should be set!
-
-**Whew**
-
-Finally, here are the resources I found to help me out with this
-solution.
-
-* http://ubuntuforums.org/showpost.php?p=7809488&postcount=1
-* http://ubuntuforums.org/archive/index.php/t-1243920.html
-* http://ubuntuforums.org/archive/index.php/t-1554718.html
-* http://ubuntuforums.org/showthread.php?t=1379902
-* http://ubuntuforums.org/showthread.php?t=1554718&page=1
-
-
-Category:Linux
-Category:Debian
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Digraphs.ascii b/src/Digraphs.ascii
deleted file mode 100644
index 0a3d116..0000000
--- a/src/Digraphs.ascii
+++ /dev/null
@@ -1,114 +0,0 @@
-Digraphs
-========
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-Wikipedia defines digraphs (and trigraphs) as
-
-[quote, Wikipedia, 'http://en.wikipedia.org/wiki/Digraph_%28computing%29[Digraphs and trigraphs]']
-____
-sequences of two and three characters
-respectively, appearing in source code, which a programming language
-specification requires an implementation of that language to treat as if they
-were one other character.
-____
-
-
-If you've spent much time in Unix, you have likely seen their character
-representations on a rare occasion. Usually they begin with a ^ followed by
-some key code. Note though that I said "spent much time in _Unix_ though. This
-is because Linux doesn't _usually_ (with some exceptions) have problems with
-digraphs. When I say Unix though, I am referring to the really old ones that
-claim to be up-to-date like AIX, Solaris, and HPUX.
-
-
-[[what-do-digraphs-have-to-do-with-old-unix]]
-== What do digraphs have to do with old Unix?
-
-Digraphs are actually used every time you use a Unix/Linux box from the
-command line. There's this realy nifty thing called *stty* that flies
-under the radar most if not all of the time on newer systems. I don't
-know of a single Linux distro that doesn't set stty for you. The reason
-it flies under the radar so often is because it's something that's been
-standardized for so long that it is all but set in stone (as far as I
-know). It's also super handy to have set, and super infuriating to not
-have set.
-
-
-[[what-is-stty]]
-=== What is stty?
-
-Well, technically STTY is an acronym for "**S**et **TTY**". That's tons of help
-though. What's TTY? It turns out that
-http://en.wikipedia.org/wiki/Tty_%28Unix%29[TTY] is an acronym for
-**T**ele**TY**pewriter. Combining all that goodness, we have **S**et
-**T**ele**TY**pewriter.
-
-Now, all this is great, but really, what does this have to do with anything? It
-turns out that while we nearly never need to directly deal with it, we actually
-use it all the time. Here's a short list of a few things we use it for in
-*nix...
-
-* Backspace
-* Scrolling with a mouse in a terminal
-* Ctrl+C (sigterm)
-* Ctrl+D (logout/eof)
-* All arrow keys, both horizontal and vertical
-
-I mentioned earlier that stty is set by default on nearly all modern Linux and
-Unix distributions with the exception of old Unix distributions such as AIX,
-Solaris, and HPUX. I posed this question to a few AIX admins I know and all of
-them told me that IBM doesn't set stty for you by default because it's more
-customizable than Linux, therefore better. I have my own very charged opinion
-as to why they don't set a default, but I will leave that out of this post.
-
-
-[[what-does-stty-look-like]]
-== What does stty look like?
-
-Where I work, management is endeavoring to make their Linux environment as much
-like AIX as possible. One step in that process is to merge the .profile
-configurations. Since Linux doesn't have stty set in .profile because the
-system has a default, AIX using a Linux .profile doesn't support the
-afforementioned list of modern keyboard keys (backspace? really? no). Imagine
-how infuriating command line can get without arrow keys for cursor movement, a
-backspace to correct your mistakes, and Ctrl+C to clear your line or stop your
-process. The only option we have here is to re-set the Linux stty so when the
-profile is sent over to an AIX system, it also has stty set on login. Here's my
-attempt at porting my Arch Linux stty to aix.
-
-----
-stty erase ^? kill ^U intr ^C eof ^D quit ^\ start ^Q stop ^S susp ^Z rprnt ^R werase ^W lnext ^V flush ^O time 0 -parenb -parodd cs8 -hupcl -cstopb cread -clocal -ignbrk -brkint -ignpar -parmrk -inpck -istrip -inlcr -igncr icrnl ixon -ixoff -iuclc -ixany -imaxbel -olcuc -ocrnl onlcr -onocr -onlret -ofill -ofdel nl0 cr0 tab0 bs0 vt0 ff0
-----
-
-
-[[what-does-all-that-do]]
-== What does all that do?
-
-I really only want to cover a few things in that list because they are the most
-frequently used and caused me trouble when I was trying to set this up.
-
-Each of those items up there starting with a
-https://en.wikipedia.org/wiki/Caret#Circumflex_accent[\^ (Circumflex Accent)]
-represents a control key combination. For instance, +eof \^D+ will send the
-logout signal upon pressing Ctrl+D. The problem here is that those "circumflex
-accents" aren't caret characters. A circumflex accent is its own character. How
-do we do these in vi/vim? You need another control key combination to tell
-vi/vim that you are going to be pressing a control key combination of course!
-
-To do, for instance, the Ctrl+D sequence in vim, go into insert mode and type
-+Ctrl+v Ctrl+d+ (the d is not capitalized) and you should see +\^d+ show up.
-
-I did have two problems with this method though: \^S and \^Q. It turns out that
-those aren't Ctrl+S and Ctrl+Q. Since I didn't know those, I elected to use the
-actual digraph instead of the character version to set them. To do this, go
-into insert mode again and hit +Ctrl\+k+ and type the digraph. In the
-case of \^Q and \^S, these are D1 and D3, respectively.
-
-
-Category:Linux
-Category:Vim
-Category:Unix
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Divs_That_Move_When_Users_Scroll.ascii b/src/Divs_That_Move_When_Users_Scroll.ascii
deleted file mode 100644
index ca7f711..0000000
--- a/src/Divs_That_Move_When_Users_Scroll.ascii
+++ /dev/null
@@ -1,82 +0,0 @@
-Divs that Move When Users Scroll
-================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Today I was working on a project that has a search box at the top of the page
-in the primary nav bar that I thought would be nice if it stayed put when
-scrolling through the hundreds of lines of data on the page. I thought, 'Moving
-elements on a page must entail javascript, right?'.
-
-Wrong
-
-
-[[with-javascript]]
-=== With Javascript
-
-But alas, I started down the JavaScript path anyways. So I can cut to
-the chase a bit sooner, I'll just paste the function I wrote so those of
-you out there who want to use Javascript can.
-
-----
-function setScrollable(ScrollObject) {
-  ScrollObject.style.top=window.pageYOffset+'px';
-  ScrollObject.style.left=window.pageXOffset+'px';
-}
-----
-
-To use that function, you need several things. First, you need the onscroll
-event in your body tag.
-
-----
-<body onscroll="setScrollable(document.getElementById('ScrollDiv'));">
-----
-
-Finally, you need one thing set in your styles (perhaps two, depending on if
-you're using z-values)...
-
-----
-div#ScrollDiv {
- position:absolute;
- z-index:100;
-}
-----
-
-And presto! You've got yourself a div that moves up, down, left, and right when
-your user scrolls.
-
-You will however likely notice that when you scroll quickly, the bar flickers.
-Well, it doesn't flick. It's more like it your browser doesn't process the
-JavaScript fast enough for the bar to stay at the top during an onscroll event
-ergo, it takes a few to catch up. I thought to myself, 'How does Google pull
-this off so seamlessly with their in-browser chat windows that stay put so
-nicely at the bottom right hand of your screen whilst scrolling?' (oh yes,
-whilst was in that thought). After looking around for a while, it hit me that
-you can use CSS to do this.
-
-
-[[with-css]]
-=== With CSS
-
-As it turns out, that fancy property we all use to keep our backgrounds
-from scrolling on our pages also works with objects. To implemenet this
-the CSS way, all you need to do it put in a bit of styling to position
-your div (or whatever object you want stationary) and your'e set.
-
-----
-div#ScrollDiv {
- position:fixed;
-}
-----
-
-Sweet mother, that was easy!
-
-
-Category:CSS
-Category:HTML
-Category:JavaScript
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Don't_Censor_Me_Bro!.ascii b/src/Don't_Censor_Me_Bro!.ascii
deleted file mode 100644
index 158abf8..0000000
--- a/src/Don't_Censor_Me_Bro!.ascii
+++ /dev/null
@@ -1,124 +0,0 @@
-Don't Censor Me Bro!
-====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-Most of the people who spend any time on this site are likely techies
-and already know that the road post-SOPA (and PIPA) is a long and dark
-one. For those of you who may not know exactly what it's all about
-though, here's a short summary from Wikipedia...
-
-[quote, Wikipedia, 'https://en.wikipedia.org/wiki/Stop_Online_Piracy_Act[Stop Online Piracy Act]']
-____
-The bill would authorize the U.S. Department of Justice to seek court orders
-against websites outside U.S. jurisdiction accused of infringing on copyrights,
-or of enabling or facilitating copyright infringement. After delivering a court
-order, the U.S. Attorney General could require US-directed Internet service
-providers, ad networks, and
-payment processors to suspend doing business with sites found to
-infringe on federal criminal intellectual property laws. The Attorney
-General could also bar search engines from displaying links to the
-sites.
-____
-
-That sounds pretty harmless, doesn't it?
-
-While the bill seems to have good intentions (who likes a pirate, right?...),
-the overall consequences of it are heavily dependent on how the bill defines of
-"copyright infringement". The (very) unfortunate issue here is that the
-definition of a person infringing a copyright is very broad and could cover a
-very large portion of the internet. To quote
-http://thomas.loc.gov/cgi-bin/query/z?c112:H.R.3261.IH:/[section 201],
-subsection A of subsection A of the SOPA...
-
-[quote]
-____
-. IN GENERAL- Any person who willfully infringes a copyright shall be punished
- as provided under section 2319 of title 18, if the infringement was committed--
-.. for purposes of commercial advantage or private financial gain;
-.. by the reproduction or distribution, including by electronic means, during
- any 180-day period, of 1 or more copies or phonorecords of 1 or more
- copyrighted works, or by the public performance by means of digital
- transmission, during any 180-day period, of 1 or more copyrighted works,
- when the total retail value of the copies or phonorecords, or of the public
- performances, is more than $1,000; or
-.. by the distribution or public performance of a work being prepared for
- commercial dissemination, by making it available on a computer network
- accessible to members of the public, if such person knew or should have
- known that the work was intended for commercial dissemination.
-____
-
-That's pretty broad. So far, that would most likely shut down Youtube, Facebook
-(people link to Youtube videos, right?), possibly WIkipedia, and most if not
-all of the video hosting sites out there (metacafe, vimeo, possibly netflix if
-their licensing isn't right, etc). A big problem here is that there is that a
-person uploads to Youtube, yet the website will be taken down for one person,
-punishing the rest. But that's aside the point (or is it?). Back to the legal
-talk. In section 201 of the SOPA legislation subsection C under subsection A
-the bill describes examples of copyrighted material that can be infringed upon
-(definition of "work being prepared for commercial dissemination") ...
-
-[quote]
-____
-. a computer program, a musical work, a motion picture or other audiovisual
-work, or a sound recording, if, at the time of unauthorized distribution or
-public performance--
-.. the copyright owner has a reasonable expectation of commercial distribution;
-and
-.. the copies or phonorecords of the work have not been commercially
-distributed in the United States by or with the authorization of the copyright
-owner; or,
-.. the copyright owner does not intend to offer copies of the work for
-commercial distribution but has a reasonable expectation of other forms of
-commercial dissemination of the work; and</li>
-.. the work has not been commercially disseminated to the public in the United
-States by or with the authorization of the copyright owner;
-. a motion picture, if, at the time of unauthorized distribution or public
-performance, the motion picture--
-.. has been made available for viewing in a motion picture exhibition facility;
-and
-.. has not been made available in copies for sale to the general public in the
-United States by or with the authorization of the copyright owner in a format
-intended to permit viewing outside a motion picture exhibition facility; or
-.. had not been commercially disseminated to the public in the United States by
-or with the authorization of the copyright owner more than 24 hours before the
-unauthorized distribution or public performance.'.
-____
-
-So what we have here is a very broad definition that covers every single
-copyrighted work of music, software, and sound recording (you can copyright
-those?) in the United States. That definitely would shut down every single
-video hosting site and any other site that re-posted videos/recordings from
-those sites. The consequences of this could be so far reaching.
-
-This bill is a reaction that reminds me of
-https://www.eff.org/cases/lenz-v-universal[Stephanie Lenz vs UMPG], a mother
-who lost the suit and was put in prison for posting a 29 second video of her
-child dancing to a Prince song. This kind of response is juvenile at best. SOPA
-is very similar. I mean, who would shut down an entire website just because
-someone posted a short clip of your song on their website? This bill can only
-end poorly. If all it takes to have your website taken down, removed from
-search engines, and banks required to not do business with you is a single
-short clip of a copyrighted song or movie, what kind of punishment will we have
-in 10 years for doing 5 over on the interstate? Moreover, the issue just isn't
-about an unjust punishment for something that can barely be construed as a
-misdemeanor in almost every case, it's about censorship. How is it a good thing
-that one government (let alone more than one) have the power to censor the
-entire world? We've seen what this can do from China. Why is it that this is
-even an issue when we've already seen what this does?
-
-Please check out the
-http://en.wikipedia.org/wiki/Wikipedia:SOPA_initiative/Learn_more[Wikipedia
-page] (the only page that is currently not blacked out), read up on the
-subject, and contact your local government representative. Wikipedia will get
-you contact information for who that is if you go to their homepage. Also, if
-you would like to read the actual bill (as of October 26, 2011), please check
-out the Library of Congress site
-http://thomas.loc.gov/cgi-bin/query/z?c112:H.R.3261.IH:/[here].
-
-
-Category:Politics
-Category:EFF
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii b/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii
deleted file mode 100644
index ce8b379..0000000
--- a/src/Drupal,_Mod_rewrite,_Subdirectories,_and_Nginx.ascii
+++ /dev/null
@@ -1,106 +0,0 @@
-Drupla, Mod Rewrite, Subdirectories, and NGINX
-==============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A few days ago I started dabbling with nginx (many thanks for the
-http://arstechnica.com/business/news/2011/11/a-faster-web-server-ripping-out-apache-for-nginx.ars[article]
-from http://arstechnica.com/[arstechnica]) knowing I was getting myself into a
-world without htaccess files. They say that Nginx is easier to configure than
-Apache, but oh contraire! If you're doing a simple setup, yes, Nginx is much
-easier than Apache. If you're even doing a slightly more complicated virtual
-host setup, Nginx is definitely much easier. However, if you do much with
-mod_rewrite in Apache, you'll likely find yourself confused a bit with all
-kinds of 404s on your subdirectories. Believe it or not though, with Nginx it
-is actually easier to configure URI rewriting as well, provided you know what
-you're doing...which I do not.
-
-My current setup has Drupal at the root directory, and various other tidbits
-hosted in subdirectories. These aren't anything fancy like subdomains, just
-directories beneath /.
-
-Pretty much any CMS/blog these days uses the .htaccess file to perform URI
-rewrites for search engine friendly URIs, which causes some complications for
-Nginx since you have one config file to set up all of that for your entire
-domain, rather than a config file per directory (if you wish) defining rewrite
-rules for each one. To get my Drupal instance back up and running, I took the
-location directive from the http://drupal.org/node/110224[Drupal Support page]
-for this issue. Specifically I used the following lines...
-
-----
-location / {
- root /path/to/drupal;
- index index.php index.html;
- if (!-f $request_filename) {
- rewrite ^(.*)$ /index.php?q=$1 last;
- break;
-
- }
- if (!-d $request_filename) {
- rewrite ^(.*)$ /index.php?q=$1 last;
- break;
- }
-}
-----
-
-The problem with using that configuration is that any time you try to hit a
-legitimate sub directory, you receive a 404. The reason for this is because the
-request_filename will end up going to
-<nowiki>http://yoursite.com/index.php?q=request_filename</nowiki>. An example
-of this would be... Say you go to your site at the following URI:
-<nowiki>http://blerdibler.com/chips</nowiki>. The previous configuration would
-send the request to <nowiki>http://blerdibler.com/index.php?q=chips</nowiki>,
-which of course doesn't exist, so we receive a 404. The fix for this is
-relatively simple, which is very unfortunate because I spent a long time
-finding this face-palmingly simple solution (mostly because once again, I do
-not know what I'm doing).
-
-The fix is to move the Drupal rewrite stuff to its own named location
-directive (I'll show what that looks like in a few), and reference that for the
-last case scenario. So, here's what my location directives look like that allow
-for me to hit up my sub directories as well as my rewritten Drupal pages.
-
-----
-location / {
- index index.html index.htm index.php;
- try_files $uri $uri/ @drupal;
-}
-location @drupal {
- rewrite ^(.*)$ /index.php?q=$1 last;
- break;
-}
-----
-
-So what we're doing here is trying all requests at face value. This means that
-Nginx tries to load http://blerdibler.com/anchovies when
-http://blerdibler.com/anchovies (a file called anchovies, not the directory) is
-called.
-
-If it can't load that, it tries http://blerdibler.com/anchovies/ (the directory
-called anchovies...consequently it searches for index.html/htm/php).
-
-Finally, if neither of those work, it calls the location directive called
-drupal (@drupal) which sends the request to
-http://blerdibler.com/index.php?q=anchovies. If that doesn't work, you're hosed
-and hopefully you've got an attractive 404 page set up. Incidentally, this also
-works for all nested Drupal/Wordpress instances as well (say, a drupal instance
-located at http://blerdibler.com/drupal2.
-
-Hopefully that helped someone out because I can't write anymore on this topic
-as I am now out of coffee. Sorry. If however, you have questions/comments/etc.,
-please leave them in the comments section and I will go brew up another cup o'
-joe and help you out (if I can...yet again...I still don't know what I'm
-doing).
-
-Thanks for reading!
-
-
-Category:nginx
-Category:Apache
-Category:Drupal
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Duplicating_a_USB_Stick_with_dd.ascii b/src/Duplicating_a_USB_Stick_with_dd.ascii
deleted file mode 100644
index 6f61874..0000000
--- a/src/Duplicating_a_USB_Stick_with_dd.ascii
+++ /dev/null
@@ -1,79 +0,0 @@
-Duplicating a USB Stick with dd
-===============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have a USB stick that I use for fixing friends computers (among other things)
-that runs Arch Linux. It seems that their most frequent problems are either
-crashed hard drives or a virus that makes their computer unusable. The quick
-solution to backing their data up in either case is to boot an external drive
-and use that OS to copy the data off their drive (assuming you can still get to
-it that is). Unfortunately, flash memory has a maximum number of times that you
-can write to it, which I hit a bit quicker than I'd like running an operating
-system off of a USB stick. As you likely guessed, my USB stick is failing
-(remarkably I've been using it to do this for several years).
-
-Last night whilst (oh yes, whilst) brushing my teeth, I had an epiphany. I
-realized that instead of re-installing Linux on a new USB stick, I could use dd
-to duplicate one USB stick onto another. I tried it, and sure enough, it works
-almost perfectly. I say almost because there was one minor problem that I will
-get to in a minute. Firstly though... The command *dd* is used for making
-bit-for-bit duplicates or data. In this case, we're duplicating the exact bits
-on one device (a USB stick) to another device (another USB stick). You can
-actually use dd to duplicate most Linux ISO installation files onto a USB stick
-as well. It works very similarly to burning a CD. Now that that's explained,
-here's the command I used.
-
-Assuming my source USB stick is at /dev/sdb and my destination stick is at
-/dev/sdc (partitions don't matter here because we're duplication the entire
-drive, not just one partition).
-
-----
-dd if=/dev/sda of=/dev/sdb
-----
-
-(The *if* is the "input file" and *of* is the "output file") That will likely
-take a while. For me it ran at 2.3 megabytes per second. Yours might run a bit
-slower as my destination USB stick has an average write speed of 25 MBps which
-is a bit higher than typical USB sticks (many thanks to Newegg for the great
-price). On the flip side, my source usb stick was starting to fail, so it might
-go way faster. I'm not really sure.
-
-Okay, now that one issue I had.
-
-Two different 8 gigabyte USB sticks (that's what I used) are likely not going
-to have the exact same amount of space on them. If your destination USB stick
-is even slightly smaller than your source disk, you'll miss some data due to
-the disk being full (in the case of dd, even empty space has bits in it that
-get transferred). This will cause problems for you because the filesystem will
-say that it starts here and ends there, when the actual partition ends earlier
-than expected. While this likely won't cause issues for you other than your
-machine complaining about it, it could result in some data loss. Either way,
-the way we get around this issue is really simple. Once you've duplicated your
-USB stick and booted the new one, you should see the following error:
-
-----
-The filesystem size (according to the superblock) is derpderpderp blocks The
-physical size of the device is blerdibler blocks
-Either the superblock or the partition table is likely to be corrupt!
-----
-
-Just run this command and it should solve your issue
-
-----
-resize2fs /dev/sd-lastpartition
-----
-
-In my case that command was *resize2fs
-/dev/sdb4* (my /home partition).
-
-May there be utility USB sticks for all!
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/EXE_Disassociation.ascii b/src/EXE_Disassociation.ascii
deleted file mode 100644
index f17549b..0000000
--- a/src/EXE_Disassociation.ascii
+++ /dev/null
@@ -1,53 +0,0 @@
-EXE Disassociation
-==================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-I recently fixed a computer with a problem that I have not seen in several
-years.
-
-As every IT guy knows, files in Windows typically have an extension and that
-extension is what Windows uses to determine which program should be used as the
-default for opening that type of file (.docx Microsoft Word, .txt Notepad, .jpg
-your image program, etc).
-
-That being said, what program is used to open .exe (executables / programs)
-files? I actually am unsure as to the answer for this one. I presume Windows
-sees an executable file and knows to run it as a program rather than a file
-that is loaded by another program.
-
-
-[[the-problem]]
-== The Problem
-
-Unfortunately, Windows can lose the association between a .exe and how the file
-should be run.
-
-This is set in the registry but without the ability to run executable files,
-one can't run regedit to make the changes.
-
-
-[[the-workaround]]
-== The Workaround
-
-Make a .reg file that will correct the problem.
-
-Just open the following link, download the file called EXE File Association
-Fix, and run it.
-
-[http://www.dougknox.com/xp/file_assoc.htm" target="_blank
-http://www.dougknox.com/xp/file_assoc.htm]
-
-Restart your computer after you have run the registry file. After logging in,
-your executable assiciation problems should be fixed.
-
-Many thanks to [http://www.dougknox.com/" Doug Knox] for this fix.
-
-Cheers all!
-
-
-Category:Microsoft
-Category:Windows
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Empathy_Accounts_Dialog_won't_Launch.ascii b/src/Empathy_Accounts_Dialog_won't_Launch.ascii
deleted file mode 100644
index f1aa6ae..0000000
--- a/src/Empathy_Accounts_Dialog_won't_Launch.ascii
+++ /dev/null
@@ -1,36 +0,0 @@
-Empathy Accounts Dialog Won't Launch
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I am currently working on a blog post on how to build a non-linux user
-friendly-ish laptop (something similar to Ubuntu in software sets) using Arch
-Linux (I know, not exactly the best of ideas). In this process, I installed
-Empathy, a multi-medium instant messenger. When I tried to add an account
-however, I ran into a strange issue that gave me very ambiguous errors (which I
-unfortunately forgot to copy). After searching around, I stumbled upon
-https://bbs.archlinux.org/viewtopic.php?id=96918[this] forum thread that solved
-my problem. The issue is that in Arch Linux, installing Empathy doesn't
-automagically install telepathy, a framework for real time conversation (the
-project page can be found http://www.ohloh.net/p/telepathy[here]). To fix this
-issue, we simply need to install telepathy.
-
-----
-pacman -S telepathy
-----
-
-And with that, give Empathy a reboot
-(http://www.youtube.com/watch?v=W8_Kfjo3VjU[three times]). I found
-unfortunately however that Empathy has a process that likes to hang behind even
-after quitting the application. Just run an ol' *ps -ef | grep empathy* and
-kill the pid and you should be golden.
-
-
-Category:Linux
-Category:Linux_Applications
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Enabling_Colors_in_Ksh88.ascii b/src/Enabling_Colors_in_Ksh88.ascii
deleted file mode 100644
index d0c9e83..0000000
--- a/src/Enabling_Colors_in_Ksh88.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-Enabling Colors in ksh88
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I won't lie. I'm a Linux user. At the very least, I'm a semi-up-to-date 'nix
-command line user and I work on AIX far more than I'd like. I intend no offense
-to you AIX guys out there. My frustration with it is primarily korn shell;
-Others fustrations including the fact that every package IBM releases for it is
-between 15 to 25 years old, and its ever-leaning tendancies towards non-Unix
-ways of doing things (eg: smitty, odm, init binaries instead of scripts, etc.).
-
-However, it is what it is. If you like frivolus things such as color in your
-terminal, you may have noticed that putting it in your .profile doesn't work
-super well. It turns out that ksh88 won't recognize the \e or the \033
-characters in place of the actual esc character (no, you're not doing anything
-wrong). What you need to do instead is hit the following key sequence in vi to
-get an actual escape character
-
-* Go into insert mode
-* Press ctrl+v
-* Hit the escape key on your keyboard
-
-You should now see something like **^[**. This represents an escape key
-press. All of your color-set sequences should be the same right after
-this character.
-
-For an example, Creating bold text might look like...
-
-----
-Bold text
-----
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Encrypting_Home_Directories_with_EncFS.ascii b/src/Encrypting_Home_Directories_with_EncFS.ascii
deleted file mode 100644
index a8b8dbb..0000000
--- a/src/Encrypting_Home_Directories_with_EncFS.ascii
+++ /dev/null
@@ -1,84 +0,0 @@
-Encrypting Home Directories with EncFS
-======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Before I go into how to do this, I'd like to take a moment to explain how encfs
-works in slightly simpler terms than are detailed on the
-http://www.arg0.net/encfsintro[encfs introduction page]. Originally, I was
-going to write my own explanation, but the Wikipedia article on this explains
-it so much better than I did (I just erased several paragraphs after reading
-the Wikipedia article).
-
-____
-EncFS is a Free (GPL) FUSE-based cryptographic filesystem that transparently
-encrypts files, using an arbitrary directory as storage for the encrypted
-files.
-____
-
-Two directories are involved in mounting an EncFS filesystem: the source
-directory, and the mountpoint. Each file in the mountpoint has a specific file
-in the source directory that corresponds to it. The file in the mountpoint
-provides the unencrypted view of the one in the source directory. Filenames are
-encrypted in the source directory. Files are encrypted using a volume key,
-which is stored encrypted in the source directory. A password is used to
-decrypt this key."
-
-http://en.wikipedia.org/wiki/Encfs[Original article]
-
-Wow. How was that for an explanation? I love Wikipedia.
-
-Now that that is out of the way, let's get on to business...
-
-To start things off, we have to create our two directories, the source
-directory and the mountpoint directory. Both should be owned by the user using
-the encrypted data.
-
-----
-mkdir /home/.user && chown -R user:user /home/.user
-mkdir /home/user && chown -R user:user /home/user
-----
-
-*.user* is the
-encrypted data. You don't ever write data to this directory. EncFS
-handles this for you. **user** is the decrypted data/the mountpoint. You
-ONLY write data here. When you write data here, it shows up in .user as
-encrypted data.
-
-----
-encfs /home/.user /home/user
-----
-
-This will mount /home/.user at the mountpoint /home/user. Without getting too
-specific, what happens is when data is written to /home/user, the data goes
-through EncFS which encrypts that data before writing it to /home/.user/. When
-data is read from /home/user/, the request goes through EncFS, which grabs the
-encrypted version of the file from /home/.user/ and temporarily decrypts it in
-RAM for your use. Ah the beauty of the seamless Linux mounting paradigm
-(that's para-dig-um, not paradigm).
-
-Since we are encrypting an entire home directory, we need to use a nonempty
-parameter for Fuse since the home directory will always contain something like
-\.bash_history from a command line login, or .local from a GUI login. Here's
-our final command.
-
-----
-encfs -o nonempty /home/.user /home/user
-----
-
-And with that, you have an entirely encrypted home directory.
-
-On a final note, be sure you keep the file located at /home/.user/.encfs6.xml
-backed up. That file contains all the data that EncFS needs to use your
-encrypted data. Without this, retreiving your data will be a lot more
-difficult.
-
-
-Category:Linux
-Category:Encryption
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Exim_Spam_Filtering_with_Bogofilter.ascii b/src/Exim_Spam_Filtering_with_Bogofilter.ascii
deleted file mode 100644
index cb9578b..0000000
--- a/src/Exim_Spam_Filtering_with_Bogofilter.ascii
+++ /dev/null
@@ -1,289 +0,0 @@
-Exim Spam Filtering with Bogofilter
-===================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have been operating a personal email server for the past 4-ish years with
-very little trouble. My server itself received a truck-load of spam email, but
-none of it was delivered because every email was addressed to an account that
-didn't exist on my server (love that check_local_user filter). I received maybe
-one spam email every 3 - 6 months until recently when my email address was
-leaked in the link:Aol_Email_Hacked[Aol email breach]. While I'm a bit upset at
-Aol for that, I guess it was bound to happen sooner or later to one of the
-email providers, so I guess I can't be too upset. In the end, it's been a good
-experience because it forced me to [finally] learn to set up a spam filter with
-Exim.
-
-I searched the internet for several days weighing the pros and cons of each
-available spam filter (spamassassin, razor, dspam, bogofilter) until finally
-settling on http://bogofilter.sourceforge.net/[Bogofilter] due to it's small
-size and that it's written in C (might as well have something that _can_ handle
-a lot of spam, even if it isn't).
-
-Once I settled, I ran into the problem that spam filtering isn't a very well
-documented thing. All of its parts are _fairly_ well documented, but no one
-place really seems to put it all together with a good explanation of how each
-part interracts. Hopefully I can do that for you here.
-
-[[assumptions]]
-== Assumptions
-
-. Each user's mail is stored in *maildir* format
-. Each user's mail is stored in the *~/Mail* directory
-. Spam will be stored in a directory called *spam*
-. Less sure emails will be delivered to a *unsure* directory
-
-
-[[bogofilter-configuration]]
-== Bogofilter Configuration
-
-First, we need to set up the actual mail analysis software, Bogofilter. My
-bogofilter configuration is fairly simple. To keep things nicely relegated to
-one area of my server, I have my bogofilter logs and word databases stored in
-__/home/mail/bogofilter__.
-
-Regarding the configuration file (/etc/bogofilter/bogofilter.cf), I am using
-the following simple configuration.
-
-./etc/bogofilter/bogofilter.cf
-----
-bogofilter_dir = /home/mail/bogofilter/
-ham_cutoff = 0.60
-spam_cutoff = 0.80
-----
-
-To give you an idea of what that does, emails with a "spamicity" rank of 60% or
-higher are listed as *Unsure* (remember, ham is good email) and thus will be
-sent to the unsure mail directory. Emails with a "spamicity" rank of 80% or
-higher will be sent to the *spam* directory (see #Assumptions section).
-
-[[exim-configuration]]
-== Exim Configuration
-
-[[routers]]
-=== Routers
-
-Routers in Exim do just what their name indicates: route email.
-Specifically, they route email to transports, but more on those in the
-link:#Transports[next section]. One thing to note on these before we get
-to the actual configuration part, routers in Exim are all executed, in
-sequence, until the email is either denied or delivered.
-
-Note: To give the reader a better idea of where the spam-related routers go, I
- have included the router names for the defaults to provide context.
- Spam-related routers are listed in bold.
-
-./etc/mail/exim.conf
-----
-begin routers
-...
-dnslookup:
-...
-#
-# BOGOFILTER router
-#
-# Routes all mail to spam@domain.tld to the bogo_spam_transport
-bogo_setspam_router:
- driver = accept
- condition = ${if eq {$local_part}{spam} {yes}{no}}
- transport = bogo_spam_transport
-
-# Runs the received email through as a neutral status to be scanned.
-bogo_check_router:
- no_verify
- check_local_user
- domains = +local_domains
- condition = ${if !eq {$received_protocol}{bogodone} {1}{0}}
- driver = accept
- transport = bogo_check_transport
-
-...
-system_aliases:
-...
-user_forward:
-...
-
-# Delivers bogo spam mail to the spam directory
-localuser_bogo_spam:
- driver = accept
- check_local_user
- condition = ${if match{$h_X-Bogosity:}{Spam.*}{1}}
- transport = local_delivery_spam
- cannot_route_message = Unknown user
-
-# Delivers bogo unsure mail to the unsure directory
-localuser_bogo_unsure:
- driver = accept
- check_local_user
- condition = ${if match{$h_X-Bogosity:}{Unsure.*}{1}}
- transport = local_delivery_unsure
- cannot_route_message = Unknown user
-
-...
-localuser:
-...
-----
-
-What we just did here is create four new routers. Here's what each does.
-
-bogo_setspam_router:: Sends emails sent to "spam@domain.tld" to the
-bogo_setspam_transport.
-
-bogo_check_router:: Sends _all_ emails to the bogo_check_transport.
-
-localuser_bogo_spam:: Sends all email to the local_delivery_spam transport.
-
-localuser_bogo_unsure:: Sends all email to the local_delivery_unsure transport.
-
-Those explanations make routers seem like they don't do much at all, and
-without corresponding transports, that would be true. Routers only serve to
-route mail that matches certain criteron to the appropriate transports.
-
-
-[[transports]]
-=== Transports
-
-Transports in Exim perform actions (you might also call these __drivers__).
-They are not processed unless an email is sent to them by a router.
-Consequently, they can be placed anywhere aned in any order within the
-*transports* section of the Exim config file.
-
-./etc/mail/exim.conf
-----
-begin transports
-...
-# Bogofilter will add X-Bogosity header to all incoming mail. This can go
-# anywhere in the transport section, usually at the very end after
-# address_reply
-bogo_check_transport:
- driver = pipe
- command = /usr/bin/exim -oMr bogodone -bS
- use_bsmtp = true
- headers_add = X-Bogofilterd: true
- transport_filter = /usr/bin/bogofilter -d /home/mail/bogofilter -l -p -e -u
- return_fail_output = true
- group = mail
- user = exim
- home_directory = "/home/mail/bogofilter"
- current_directory = "/home/mail/bogofilter"
- log_output = true
- return_path_add = false
-
-# This adds updates the bogofilter database with this email explicitely set as
-# spam (intended for spam@domain.tld)
-bogo_setspam_transport:
- driver = pipe
- command = /usr/bin/bogofilter -d /home/mail/bogofilter -s -l
- use_bsmtp = true
- return_fail_output = true
- group = mail
- user = exim
- home_directory = "/home/mail/bogofilter"
- current_directory = "/home/mail/bogofilter"
- log_output = true
-
-
-# Called when delivering mail to the spam directory
-local_delivery_spam:
- driver = appendfile
- directory = $home/Mail/.spam
- maildir_format
- maildir_use_size_file
- delivery_date_add
- envelope_to_add
- return_path_add
-
-# Called when delivering mail to the unsure directory
-local_delivery_unsure:
- driver = appendfile
- directory = $home/Mail/.unsure
- maildir_format
- maildir_use_size_file
- delivery_date_add
- envelope_to_add
- return_path_add
-----
-
-We just added four transports.
-
-bogo_check_transport:: Uses the _pipe_ driver. Essentially, this one is a
- passthrough transport. It takes the email text and sends it through the
- bogofilter binary with a neutral status. The bogofilter binary inserts a few
- headers into the email as it processes, and then returns. The most important
- of these headers for our purposes is the X-Bogosity header. This one will be
- used later on for delivering mail to the correct directory.
-
-bogo_setspam_transport:: This transport also uses the _pipe_ driver. It is
- called by the bogo_setspam_router, which only catches email sent to
- "spam@domain.tld". The intent of this router is to mark all emails sent
- through it explicitely as spam. This is so users can foward a spam email the
- filters missed to "spam@domain.tld" and the filter will update itself to
- assume the text in the received email is "spammy".
-
-local_delivery_spam:: This transport is a final delivery transport (the
- appendfile driver). All email sent through this transport will be delivered
- to the destination user's "spam" directory.
-
-local_delivery_unsure:: This transport is a final delivery transport (the
- appendfile driver). All email sent through this transport will be delivered
- to the destination user's "unsure" directory.
-
-
-[[a-few-examples]]
-== A Few Examples
-
-There are a few possible paths a given email could take through this system.
-
-
-[[a-spammy-email]]
-=== A Spammy Email
-
-If you get, for instance, an email that bogofilter would indicate is spam.
-Here's how its path would go using the previous configurations.
-
-. Exim receives the email. The bogo_setspam_router is skipped because the email
- was sent to you, not spam@example.com
-
-. The next router in line, bogo_check_router, is used because it catches all
- email. It routes the email through the bogo_check_transport transport.
-
-. The bogo_check_transport has been called and thus pipes the email through
- the bogofilter binary
-
-. The bogofilter binary inserts the *X-Bogosity* header. In the case of this
- email which is most likely spam, it will insert "X-Bogosity: Spam".
-
-. Exim continues through the routers since the email still has not been
- delivered.
-
-. The next router in line is localuser_bogo_spam. It checks that the email
- header "X-Bogosity" is equal to "Spam". In this case, the
- bogo_check_transport inserted this header and value, and so this router sends
- the email through the localuser_delivery_spam transport.
-
-. The localuser_delivery_spam transport (being called by the
- localuser_bogo_spam), delivers the email to the user's spam directory.
-
-
-[[an-aspiring-spammy-emaill]]
-=== An Aspiring Spammy Emaill
-
-[[a-hammy-good-email]]
-=== A Hammy (Good) Email
-
-If anyone has questions about this post, please ask your question on the
-link:{{TALKPAGENAME}}[discussion page] and I'll try to get this updated with
-explanations. Setting up a mail server is hard enough for new folks, without
-adding the extra complication of spam filtering (I'm fairly new to this
-myself), so please ask any and all questions.
-
-
-
-Category:Mail
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Expanding_Divs_Containing_Floated_Elements.ascii b/src/Expanding_Divs_Containing_Floated_Elements.ascii
deleted file mode 100644
index a316490..0000000
--- a/src/Expanding_Divs_Containing_Floated_Elements.ascii
+++ /dev/null
@@ -1,29 +0,0 @@
-Expanding Divs Containing Floated Elements
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Today I was working on a site with one center column that contained two columns
-within and I ran into a rather distressing problem (in the most extreme of ways
-of course). When I floated my left column to the left as well as my right
-column, I noticed that the container div shrunk itself to the size of it's
-padding, leaving the internal divs just hanging outside (in the cold). I toyed
-with all the css properties I could think of to no avail. I even consulted the
-plastic green ninja on my desk. After all else failed, I decided to consult
-the almighty Google. Behold, my findings...
-
-The website I found was a bit outdated since it referenced Firefox 1.5 as well
-as IE 5.0 (and IE 5 for Mac...I didn't know they ever had one of those).
-Despite its apparent obsolescence, the information it gave was still valid for
-this particular article.
-
-I'll spare you the talk and give you http://www.ejeliot.com/blog/59[the link].
-
-The method I ended up using was applying *overflow:auto;* to my div.
-
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Finding_Prime_Factors.ascii b/src/Finding_Prime_Factors.ascii
deleted file mode 100644
index 83ba164..0000000
--- a/src/Finding_Prime_Factors.ascii
+++ /dev/null
@@ -1,77 +0,0 @@
-Finding Prime Factors
-=====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have been working in my spare time on the https://projecteueler.net[Euler
-project] problems. Now, for most languages, these problems aren't too big of a
-deal, because most modern languages do much of the work for you (at least on
-the early problems). I'm working on learning c++ though, and it doesn't do a
-lot of the work for you, which is great in my opnion. One thing it's really
-helping me with is number theory (or whatever it would actually be called). I
-never went to school for computer science, so I lack much of the math that many
-developers have. That said, nealry every problem that Euler has, is a really
-great test, not only of programming ability, but of number knowledge.
-
-My most recent problem I've been working on is refactoring my code to solve
-https://projecteuler.net/problem=3[problem 3]. Now, this problem isn't that
-difficult. Where the difficulty lies is in the calculation speed. My original
-program solved this one in about ten minutes I think (again, if you're sporting
-something like ruby, php, perl, etc, you have probably solved this faster
-because they built good calculation methods into the language). In going bad to
-refactor though, I've been focusing more on wasy to more efficiently calculate
-these things via brute force (I'm sure there's an equation for this out there,
-but I'm using a for loop). Here is the list of things that I found to speed up
-the calculation process.
-
-
-[[calculating-factors]]
-== Calculating Factors
-
-A factor is a number that an original number is divisible by (eg: the factors
-of 10 are 1, 2, 5, and 10).
-
-
-[[dont-go-above-half]]
-=== Don't go above half
-
-When you are finding factors, you are not finding them one at a time. Each
-time you find a factor, you find its counterpart. For example, the factors of
-20 are 1, 2, 4, 5 , 10, 20. When you are looping through starting at 1 and you
-find that the number 20 is divisible by 2, you also know that its counterpart
-is 10 (20/2). When you find the next factor, 4, you have also found the factor
-5 (20/4 = 5), and so on. This means that your calculation time should be cut in
-half becuase you only have to calculate up to half of the original number (20
-in our example). One more example to help visualize this, a table. Everyone
-loves tables!
-
-Factors of 20
-
-[cols=",",options="header",]
-|===================
-|Factor |Counterpart
-|1 |20
-|2 |10
-|4 |5
-|5 |4
-|10 |2
-|20 |1
-|===================
-
-See the overlap at 4 and 5?
-
-
-[[only-calculate-evens-or-odds]]
-==== Only Calculate Evens or Odds
-
-
-[[calculating-primes]]
-=== Calculating Primes
-
-Category:Drafts
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Finding_the_Absolute_Path_of_a_Bash_Script.ascii b/src/Finding_the_Absolute_Path_of_a_Bash_Script.ascii
deleted file mode 100644
index 73a69e3..0000000
--- a/src/Finding_the_Absolute_Path_of_a_Bash_Script.ascii
+++ /dev/null
@@ -1,57 +0,0 @@
-Finding the Absolute Path of a Bash Script
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This seems to be one that a lot of people want to know how to do (I was one of
-them). In searching the internets I found a lot of suggestions to use the
-_readline_ external command. I need to have the script that uses this work on
-Linux and AIX though, which means readline and many other external commands
-will not be available to me. Here's how it can be done in bash
-
-----
-#
-# Determines the absolute path to the running script. This is useful for
-# needing to muck around in the running directory when the script has been
-# called using a relative path
-#
-getScriptAbsolutePath() {
- if [[ ${0:0:1} == '/' ]]; then
- # If the script was called absolutely
- absPath=${0}
- else
- # If the script was called relatively, strip the . off the front
- script=`echo ${0} | sed 's/\.\?\(.*\)$/\1/'`
- absPath="$(pwd)/${script}"
- fi
- # Strip the script filename off the end
- absPath=`echo ${absPath} | sed 's/\(.*\/\).*\$/\1/'`
-}
-----
-
-So what we do here is start with two variables: The working directory (output
-of pwd), and command used to call the script ($0). The command used to call the
-script could be anything like
-
-* +./blah.sh+
-* +./scripts/blah/blah.sh+
-* +/usr/local/res/scripts/blah/blah.sh+
-
-If argument 0 starts with a / (such as /usr/local/res/scripts/blah/blah.sh),
-the script was called using an absolute path, so we can just use $0 as our
-absolute path once we strip the script name off the end.
-
-If otherwise, the script was called using a relative path and $0 needs to be
-appended to the output of pwd and to get the absolute path. Using sed, we strip
-off the leading period if it exists as well as the script filename.
-
-
-Category:Linux
-Category:Bash
-Category:Scripting
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Fixing_Android_Mac_Address_Conflicts.ascii b/src/Fixing_Android_Mac_Address_Conflicts.ascii
deleted file mode 100644
index 06c8f55..0000000
--- a/src/Fixing_Android_Mac_Address_Conflicts.ascii
+++ /dev/null
@@ -1,102 +0,0 @@
-Fixing Android MAC Address Conflicts
-====================================
-
-If you already know this is the fix for your issue, you can skip this section.
-Otherwise, I'll get on to describing the problem.
-
-I have been frustrated for the last few days with my phone. I have run
-CyanogenMod for a while now on my LG G3 (since the early alpha builds), while
-my wife ran a variant of the stock carrier rom. However, due to poor battery
-life issues, she wanted to have CyanogenMod since my phone gets about twice as
-much battery life than hers does. Obligingly, I flashed CyanogenMod on her
-phone. That night I noticed a problem was occuring with both of our phones,
-which I unfortunately didn't realize the source of until today.
-
-[[symptoms]]
-Symptoms
-~~~~~~~~
-
-The symptoms of the issue were wifi was repeatedly dropping. Rebooting wifi
-and/or toggling airplane mode would fix the issue for a few minutes, but it got
-progressively worse. A few hours before writing this post, it was so bad I
-could only maintain a wifi connection for about 10 seconds before it would fail
-and not even try to reconnect for about five minutes.
-
-[[the-problem]]
-The Problem
-~~~~~~~~~~~
-
-After puzzling through the issue, it occured to me what it must have
-been: conflicting mac addresses.
-
-I checked my wife's and my phones and sure enough, their mac addresses were
-identical, specifically **00:90:4c:c5:12:38**. I did some Googling and found
-many other people to have the same issue in varying versions and roms of
-Android. After some hunting, I found a
-http://forum.cyanogenmod.org/topic/105128-wifi-bug-cm12/[temporary fix], but
-the fix was for a different phone, which stored its config files in a different
-location (oddly). I did a bit of digging through the filesystem (+find /system
--type f -exec grep -H macaddr "\{}" \;+) and finally found the file that needed
-to be modified for my phone/version of Android. For reusability purposes, I
-also turned this into a _fairly_ friendly script so other folks can do it too.
-
-Note though that this issue is very obscure and the likelyhood of seeing is it
-slim. Only people running at least two phones with this bug at the same time
-and on the same wifi network will experience this issue. This is why my phone
-operated fine for months until I put CyanogenMod on my wife's phone and she
-connected to our wifi. Further (to the credit of the CM and the various other
-Android devs out there), this problem would be tremendously difficult for a dev
-to track down because it is only problematic with two or more phones, something
-I doubt most devs are testing at the same time with.
-
-[[the-fix-script]]
-The Fix Script
-~~~~~~~~~~~~~~
-
-This script needs to be run as root to work correctly (if you don't run it as
-root, it'll complain at you and exit). Once you've run this script as root,
-simply reboot your phone and your new mac address will take effect.
-
-----
-#!/system/xbin/bash
-
-# Ensure we are running as root, because this won't work otherwise.
-uid=$(id | sed -n 's/uid=\([0-9]\+\).*/\1/p')
-if [[ ${uid} != 0 ]]; then
- echo "Not running as root. Cannot proceed. Exiting..."
- exit 1
-fi
-
-echo "Remounting /system with write access so we can make the modification."
-mount -o remount,rw /system
-
-# The path to the wlan cal file
-cal_path=/system/etc/wifi/bcmdhd.cal
-
-# Don't need this, but might be handy to have documented
-#old_mac=00:90:4c:c5:12:38
-
-# Generate the new mac address
-new_mac=$(printf '00:90:4c:%02x:%02x:%02x\n' $[RANDOM%256] $[RANDOM%256] $[RANDOM%256])
-
-# Sed expression to replace the mac address with something less problematic
-sed -i "s/macaddr=.*/macaddr=${new_mac}/" ${cal_path}
-
-echo "Your new mac address is ${new_mac}."
-----
-
-I personally placed this on my internal storage at /storage/sdcard0/mac_fix.sh.
-To execute it, as root just run...
-
-----
-bash /storage/sdcard0/mac_fix.sh
-----
-
-Note the preceeding call to the bash command. Ordinarily you would be able to
-set the execute bit on the script and directly call it. However, Android
-defaults to setting the noexec mount option for the sdcard filesystems (both
-sdcard0 and sdcard1), thus chmod +x doesn't work. This could be worked around
-in the script, but it would make it longer and I don't see the need for it. :)
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git:Branch_Author_List.ascii b/src/Git:Branch_Author_List.ascii
deleted file mode 100644
index 2a46d45..0000000
--- a/src/Git:Branch_Author_List.ascii
+++ /dev/null
@@ -1,62 +0,0 @@
-Git:Branch Author List
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Whether your team uses the long-running, topic, or any other multi-branch
-https://git-scm.herokuapp.com/book/en/v2/Git-Branching-Branching-Workflows[branching
-workflows], you usually end up with many server-side "abandonded" branches that
-haven't been commited to in a while, especially when the team is relatively
-agile. While a cluttered server-side branch set it isn't always a pressing
-issue, the difficulty in cleanup can make it be a long-standing issue; one that
-gets worse and worse as time goes by.
-
-Enter, the *git-branch-authors.sh* script.
-
-I wrote this script because my team has the problem I described above. To
-preface the source code though, git doesn't track _who_ created a branch. It
-just tracks at which commit reference the branch was forked from its parent,
-which means we can't actually tell _who_ created a given branch. However, since
-a branch is usually commited to by its creator, we can make an educated guess
-by using the name of the person who commited most recently. At the very least,
-the most recent author will give a point of contact to help find out
-information about the branch.
-
-----
-#!/usr/bin/env bash
-
-# Verify we're inside a git repo
-git status 2>/dev/null 1>/dev/null
-if [[ $? != 0 ]]; then
- echo "Error: '$(pwd)' is not a epository."
- exit 1
-fi
-
-# Set the column headers
-out='Unix Timestamp~Branch~Timestamp~Commit~Author~Relative Time'
-
-# Parse the branches
-for i in $(git branch -r | grep -v HEAD); do
- format="unix:%at~${i}~%ai~%h~%an <%ae>~commited %ar"
- cmd=$(git show "${i}" --format="${format}" | head -n 1)
- out=${out}'\n'${cmd}
-done
-
-# Output the goodness
-echo -e ${out} | sort -r -n | column -s '~' -t
-
-----
-
-To use this, simply save it to your *~/bin* directory (ensure your PATH
-variable has \~/bin in it or that won't work) and +chmod \+x
-~/bin/git-branch-authors.sh+.
-
-Category:Bash
-Category:Scripts
-Category:Git
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git:Care_Free_Committing.ascii b/src/Git:Care_Free_Committing.ascii
deleted file mode 100644
index 2e518c3..0000000
--- a/src/Git:Care_Free_Committing.ascii
+++ /dev/null
@@ -1,97 +0,0 @@
-Git:Care-free Committing
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-In the past, I have found myself a bit afraid to fully use git, because git
-history is something so painful to rewrite, especially when the repo is shared
-by other users. Besides, it just seems bad practice (and it is) to rewrite
-history.
-
-With true code, my concerns are a bit alleviated because most of the time you
-can test that locally. The situation I'm referring to is using git as a
-deployment mechanism for servers. Let me walk you through my old thought
-process.
-
-I want to try a new change for particular server type. I have two options. I
-can just log into the server and try out my change, hoping the one that I
-commit as a "copy-paste" later into the git repo works identically, or I can
-make the change inside the git repo, push it upstream, triggering the test
-deployment, which I can (and should) test with. However, what if the change
-doesn't work? I can fix it sure, but I'll muck up the history with unecessary
-"Broke it...fixed it" commits, and removing those will require rewriting
-history.
-
-
-== Branching
-
-Git is well known for its "cheap branching". Because it makes it so easy to
-rebase and merge onto any given branch.
-
-
-== Squashing Commits
-
-Firstly, find the first commit of your branch. We'll assume that this branch
-came off of master and that we are currently working inside this branch (if
-not, run +git checkout <branchname>+)
-
-----
-git log master..HEAD
-----
-
-That command will give you a list of all commits that have happened on your
-feature branch ahead of the master branch. Assuming someone hasn't rewritten
-history (which has happened to me before...ugh), you should be looking at only
-your branch's commits. Scroll to the bottom and copy the commit id for the very
-first commit in the series.
-
-Now run...
-
-----
-git rebase -i <commit_id>^1
-----
-
-Don't forget the "carrot 1" (+^1+) at the end there, as it is very important.
-We just told git to rebase the commit series on top of the most recent commit
-from master (the "carrot 1" says "one commit before this commit", hence one
-commit before your work started since you selected your first branch commit),
-interractively. Iterractive mode gives us a chance to tell git how to handle
-each commit, be it picking, squashing, editing, rewording, etc.
-
-Running the interractive rebase should bring you into an editor with text that
-looks something like...
-
-----
-pick e57d408 Implemented new ifcfg profile functionality
-pick cd476e8 Fixed minor issue
-pick 96a112b Fixed another stupid issue
-pick 9741e2c Testing a small change
-pick ec32a51 Revert "Testing a small change"
-pick 5d61d26 Revert "Fixed another stupid issue"
-...
-----
-
-Here we can change what we want to do with each commit as the rebase proceeds.
-In this case, I want to reduce my commit set down to one commit, the most
-recent (note in your set, the most recent is on the bottom).
-
-----
-pick e57d408 Implemented new ifcfg profile functionality
-s cd476e8 Fixed minor issue
-s 96a112b Fixed another stupid issue
-s 9741e2c Testing a small change
-s ec32a51 Revert "Testing a small change"
-s 5d61d26 Revert "Fixed another stupid issue"
-...
-----
-
-It doesnt matter what the commit messages are at this point. When the time
-comes to merge the commits, you'll get a chance to rewrite the commit message.
-
-Category:Git
-Category:Drafts
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git:Changing_Project_Licensing.ascii b/src/Git:Changing_Project_Licensing.ascii
deleted file mode 100644
index d2967d6..0000000
--- a/src/Git:Changing_Project_Licensing.ascii
+++ /dev/null
@@ -1,60 +0,0 @@
-Git:Changing Project Licensing
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I'm unsure about the legality of doing something like this. I think though that
-this probably shouldn't be used if you've already released your project. If
-however you have not yet released and have changed your mind to use a different
-license prior to its release, this may be just the post for you.
-
-I recently was working on a project that prior to releasing, I decided upon
-using the Apache V2 license. After something thinking though (and about 10
-commits), I decided I wanted to release this project under the copyleft
-http://www.gnu.org/licenses/gpl-2.0.html[GPL v2] license. Unfortunately
-though, I had already commited the LICENSE file as well as put the shortened
-license header at the top of my program's files. Thankfully, git has a
-solution to fix this problem. However, we will have to fix this in two steps
-since we will be rewriting a certain file as well as deleting another entirely
-(LICENSE).
-
-[[removing-a-file-section-throughout-history]]
-== Removing a File Section Throughout History
-
-----
-git filter-branch -f --tree-filter "if link:\$(grep_'Apache'_somefile)[\$(grep 'Apache' somefile)]; then sed -i -e '2,16d' somefile; fi"
-----
-
-What this does is modify the contents of file **somefile**. Effectively, for
-each commit in history (+git filter-branch --tree-filter+), this checks if the
-file *somefile* contains the string __Apache__. If it does, it then uses sed to
-do an inline edit to delete lines 2-16 (those are the lines containing my
-license header). You will likely need to change those since not all license
-headers are the same length (and don't start at line 2).
-
-[[deleting-a-file-from-history]]
-== Deleting a File From History
-
-Now that we've cleaned out the license header, we just need to remove the
-LICENSE file from all of history so we can put a new one in. To do this, we're
-going to use the *--index-filter* switch.
-
-----
-git filter-branch -f --index-filter 'git rm --cached --ignore-unmatch ./LICENSE'
-----
-
-Something to note about the _git rm_ command we just ran. Notice the
-_--ignore-unmatch_ switch. That will make git rm return a 0 status even if the
-specified file is not found. Basically, that means that it will keep the git
-filter-branch command from exiting when it happens upon a commit where the file
-doesn't currently exist.
-
-
-
-Category:Git
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git:Clone_All_Remote_Repos.ascii b/src/Git:Clone_All_Remote_Repos.ascii
deleted file mode 100644
index 77d0523..0000000
--- a/src/Git:Clone_All_Remote_Repos.ascii
+++ /dev/null
@@ -1,109 +0,0 @@
-Git:Clone All Remote Repos
-==========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-To my knowledge, there isn't a good way to clone all git remote repos in a path
-that doesn't involve either installing a program or writing a script. That
-said, here's the script I wrote to do it.
-
-----
-#!/usr/bin/env bash
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# @author nullspoon <nullspoon@iohq.net>
-#
-
-argv=( ${@} )
-for (( i=0; i<${#argv[*]}; i++ )); do
- if [[ ${argv[$i]} == "-u" ]]; then
- user=${argv[$i+1]};
- i=$[$i+1]
- elif [[ ${argv[$i]} == "-s" ]]; then
- server=${argv[$i+1]};
- i=$[$i+1]
- elif [[ ${argv[$i]} == "-r" ]]; then
- rbase=${argv[$i+1]};
- i=$[$i+1]
- elif [[ ${argv[$i]} == "-l" ]]; then
- lbase=${argv[$i+1]};
- i=$[$i+1]
- fi
-done
-
-if [[ -z $user ]]; then
- echo -e "\nPlease specify the user (-u) to log in to the remote server as.\n"
- exit
-elif [[ -z $server ]]; then
- echo -e "\nPlease specify the server (-s) where the remote repos are located.\n"
- exit
-elif [[ -z $rbase ]]; then
- echo -e "\nPlease specify a base path (-r) where the repos are located on the remote.\n"
- exit
-elif [[ -z $lbase ]]; then
- echo -e "\nPlease specify a desginated path for local clone (-l).\n"
- exit
-fi
-
-# Escape our base path for use in regex
-rbase_esc=$(echo $rbase | sed 's/\//\\\//g')
-
-if [[ ! -e $lbase ]]; then
- echo -n -e "\n$lbase does not exist. Create? [Y/n] "
- read -n 1 c
- if [[ $c == y ]]; then
- mkdir $lbase
- else
- echo
- exit
- fi
-fi
-echo -e "\nCloning all...\n"
-
-# Get our repo list
-#conn="ssh -q ${user}@${server}"
-cmd="find $rbase -name \"*.git\""
-repos=( $( ssh -q ${user}@${server} ${cmd} | sed 's/$rbase_esc\(.*\)/\1/' ) )
-
-# This is so we can easily handle relative destination paths
-start_path=$(pwd)
-for(( i=0; i < ${#repos[*]}; i++ )); do
- # Clean up our strings first
- lrepo=$( echo ${repos[$i]} | sed 's/\(.*\)\.git/\1/' )
- lrepo=$( echo ${lrepo} | sed "s/$rbase_esc\(.*\)/\1/" )
- labs_path=$( echo "${lbase}/${lrepo}" | sed 's/\/\{1,\}/\//g' )
- rabs_path=$( echo "${repos[$i]}" | sed 's/\/\{1,\}/\//g' )
- # Do some real work
- mkdir -p "${labs_path}"
- cd "${labs_path}"
- echo -e "\nFetching ${user}@${server}:${rabs_path}\n"
- # Clone the remote
- cd ..
- git clone ${user}@${server}:${rabs_path}
- # Do not pass Go
- cd ${start_path}
-done
-----
-
-
-Category:Linux
-
-Category:Git
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git_Basics.ascii b/src/Git_Basics.ascii
deleted file mode 100644
index 03af5d3..0000000
--- a/src/Git_Basics.ascii
+++ /dev/null
@@ -1,220 +0,0 @@
-Git Basics
-==========
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Git can be a very complicated thing. Someone once told me that we mere humans
-have a very difficult time with it at first. I myself have had a
-tremendous<nowiki>[ly difficult]</nowiki> time learning how to use Git (many
-thanks to http://marktraceur.info/[marktraceur] for all the help). It is an
-incredibly robust and so a very complicated solution. What source code
-management system isn't though (especially one that is command line)? This
-document should serve as a very high level view of how to use Git. It will not
-cover advanced functionality such as
-http://git-scm.com/docs/git-cherry-pick[cherry-picking],
-http://git-scm.com/docs/git-merge[merging],
-http://git-scm.com/docs/git-rebase[rebasing], etc. If something is not
-documented here, please see the http://git-scm.com/docs[Git docs] or suggest it
-on the discussion page.
-
-[[working-with-branches]]
-Working with Branches
----------------------
-
-Branches in Git look are like tree branches. The Git repository itself is the
-trunk and the branches are the various projects in the repository. Typically
-(hopefully) these projects are related to each other. In the case of a
-development project with a frequently changing database schema that you wanted
-to back up, the repository would have two branches: the files branch where the
-code files are stored, and the database branch where the database dumps are
-stored.
-
-[[viewing-branches]]
-Viewing Branches
-~~~~~~~~~~~~~~~~
-
-Viewing branches is simple. Type *git branch* and you should see output
-similar to the following:
-
-----
-$ git branch
-
-* database
- master
-----
-
-To use a different branch, a the checkout command is required. In this case, we
-will switch from the _database_ branch to the _master_ branch.
-
-Note:Some decompression happens here so if the branch to be checked out is very
- large, this will likely take a few seconds.
-
-----
-$ git checkout master
-
-Checking out files: 100% (6110/6110), done.
-Switched to branch 'master'
-----
-
-[[commits]]
-Commits
--------
-
-Git does not have commitmentphobia. In fact, it loves commits as if it were its
-only purpose in life.
-
-In most if not all source code management software, a commit is essentially a
-set of changes to be merged into the master repository.
-
-To create a commit, there are several steps that need to take place.
-
-Firstly, the changed files to be pushed to the repository need to be added. For
-this, we use the _git add_ command.
-
-----
-$ git add ./ex1.blah
-$ git add ./example2.blah
-----
-
-One handy bit for this is the _-A_ switch. If used, git will recursively add
-all files in the specified directory that have been changed for the commit.
-This is very handy if many files were changed.
-
-----
-$ git add -A .
-----
-
-Once the changes files are set up for commit, we just need one more step. Run
-_git commit_ and you will be taken to a text editor (likely vi
-- specified in the repository configuration) to add comments on your commit so
- you and other developers know what was changed in your commit in case
-something is broken or someone wants to revert.
-
-_This piece is key if you are using the git repository as a code repository
-rather than a versioning repository for backups. Please write in meaningful
-comments._
-
-There is actually one more piece to committing a change if you have a remote
-repository on another box or a different location on the local box. So other
-developers can pull the repository and get your changes, you need to _push_
-your changes to the remote repository. Please see the
-link:#Pushing_Changes_to_the_Remote_Repository[Pushing Changes to a Remote
-Repository] section for more information on this. To do this, we use the _git
-push_ command.
-
-
-[[logs]]
-Logs
-----
-
-All of this commit and commit log business is a bit worthless if we can't look
-at logs. To look at the logs we use the _git log_ command. This will open up
-your system's pager (typically less is the one used) to view the logs for the
-current branch. If you wish to view the logs on a different branch, you can
-either check out that branch, or you can type __git log BranchName__.
-
-A handy option for the _git log_ command is the _--name-status_ switch. If you
-use this switch, git will list all of the commit logs along with all of the
-files affected and what was done (modified, deleted, created, renamed) in each
-individual commit.
-
-
-[[remote-repositories]]
-Remote Repositories
--------------------
-
-Git is a distributed code versioning system which means that every person that
-has pulled the repository has a complete copy of the original. This is really
-great for working remotely because you don't have to be online and able to talk
-to the remote repository to see change history.
-
-
-[[adding-a-remote-repository]]
-Adding a Remote Repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Git needs several things to add a remote repository. Firstly, it needs a
-local alias for the remote repository. It also needs a username to log
-in to the repo with, as well as the ip address or hostname of the
-repository, and the path to the actual repo directory on the remote
-server. With that, to add a remote repository the command looks somewhat
-like this:
-
-----
-git remote add origin gitman@someserver.org:repos/CleverProjectName
-----
-
-Now, let's break down what that all means since it seems a tad complicated.
-
-[cols=",,,,,",options="header",]
-|===========================================================================
-|git remote |add |origin |gitman |@someserver.org | :repos/CleverProjectName
-|This is the command to work with remote servers in git.
-|Tells git we are adding a remote
-|The local alias for the remote. Origin is typically used here.
-|The username to log in to the remote server with.
-|This is the server where the repo is stored
-|This is the path to the actual repository directory. Since it does not start
- with a / it starts in the home directory of gitman (~/).
-|=======================================================================
-
-[[fetching-a-remote-repository]]
-Fetching a Remote Repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Now that we have a remote repository added to our local git repository, we
-simply need to fetch the repo. To do this we use the _git fetch_ command. Here
-is where that alias from the remote add command comes in handy.
-
-----
-git fetch origin
-----
-
-This command will fetch all branches of the origin repository.
-
-[[pushing-changes-to-the-remote-repository]]
-Pushing Changes to the Remote Repository
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Now that we have a local copy of a repository to work on and have made some
-changes, some amount of code synchronization needs to take place with an origin
-repository so each of the developers can have the latest-and-greatest. With
-that, a commit only pushes code to your local copy of the repository. What
-needs to happen after a commit is to push the change to the origin repository
-so everyone else will also have access to your change set. To do this, we use
-the _git push_ command.
-
-There are two parameters for this though. The first is the local alias for the
-remote repository (typically referred to as origin since presumably the remote
-server is where your repository originated). The second parameter is the branch
-name. Since we often have more than one branch, this is a good piece to pay
-attention to so you don't submit a database dump file to the code branch.
-
-----
-git push origin master
-----
-
-
-[[dealing-with-size-issues]]
-Dealing with Size Issues
-------------------------
-
-Since git is a code versioning system that contains as many versions of a file
-as the number of commits, its size can grow out of hand rather quickly,
-especially when dealing with binaries. Luckily, there is a handy command for
-this very situation: **git gc**.
-
-This command compresses all of your repository branches in the context of each
-other. This can reduce the size of your local and/or remote repositories very
-effectively. I have a repository that should be several gigabytes with about 60
-commits per branch (it's a repo used for versioned backups), and _git gc_
-reduced it to about 370 megabytes.
-
-
-Category:Git
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Git_as_a_Backup_Solution.ascii b/src/Git_as_a_Backup_Solution.ascii
deleted file mode 100644
index 6c13353..0000000
--- a/src/Git_as_a_Backup_Solution.ascii
+++ /dev/null
@@ -1,102 +0,0 @@
-Git as a Backup Solution
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-To preface this post, I would like to point out two very key things. The backup
-files will be stored in two branches inside of a single repository. Those
-branches will be called "files" and "database". You may choose to use other
-names (such as database and master) but for the purposes of this post, the
-afforementioned names will be used.
-
-If it suits you better, you could also use two git repositories. I used that
-for a while and it worked great. I just found it more convenient to have the
-database dumps and the wiki files in one repository for simplicity.
-
-[[files-checkin]]
-Files Checkin
--------------
-
-This will catch all upgrades, uploads, settings file changes, etc.
-Anything you change on the actual filesystem where your wiki is stored
-will be commited to the repository.
-
-----
-export repo = 127.0.0.1
-# Check in the files
-cd /path/to/your/wiki
-
-# Add all new, edited, and deleted files
-git add . -A
-# Commit our changes
-git commit -m "Routine Checkin"
-# Push the commit to the files branch of our repository
-git push origin files
-----
-
-
-[[database-checkin]]
-Database Checkin
-----------------
-
-For this we are going to take a database dump and overwrite the old one
-with it. We will then check in the same file, but with the changes.
-Again, any changes made to pages, users, logs, etc will be in the dump
-file and thus will be commited to the repository.
-
-----
-dbFileName = "wiki.data.sql"
-$password = "CheckMeYo"
-$dumpPath = /path/to/dump/backups/
-mysqldump -u wikiUser -p$pass 'databaseName' > $dumpPath$dbFileName
-cd $dumpPath
-git add . -A
-git commit -m "Routine Checkin"
-# Push the commit to the database branch of our repository
-git push origin database
-----
-
-
-[[restoring-from-backups]]
-Restoring from Backups
-----------------------
-
-Restoring from a backup is actually quite simple. All one needs to do is
-fetch the repository (origin).
-
-* Firstly, pull the database branch and run a mysqlimport on the dump
- file.
-* Secondly, to get the files (and overwrite any current files), do a
-
-----
-git pull --rebase origin files
-----
-
-and the most recent version of the files branch will show up in the current
-directory.
-
-Also, if you worry someone will download your .git directory contents, you can
-just move the .git directory out when you aren't doing backups and back in
-temporarily for a backup.
-
-
-[[size-concerns]]
-Size Concerns
--------------
-
-Git has the capability to compress repositories using the *git gc* command.
-This will have git go back through all of the commits in the working repository
-and compress them in the context of all of the other commits. Currently my wiki
-plaintext database dump is 50 megabytes. It has been checked in to the
-repository 18 times and the entire repository is about 17.5 megabytes after a
-"git gc". Neat, huh?
-
-
-Category:Git
-Category:Backups
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Google_Apps_Users_:_Cannot_Use_Self-hosted_XMPP.ascii b/src/Google_Apps_Users_:_Cannot_Use_Self-hosted_XMPP.ascii
deleted file mode 100644
index df195a6..0000000
--- a/src/Google_Apps_Users_:_Cannot_Use_Self-hosted_XMPP.ascii
+++ /dev/null
@@ -1,70 +0,0 @@
-Google Apps Users Cannot Use Self-hosted XMPP
-=============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Just over a week ago, Google released Google Plus for its Google Apps
-
-users (see Google's post
-http://googleenterprise.blogspot.com/2011/10/google-is-now-available-with-google.html[here]).
-I won't lie that despite my hesitation about centralized social networks, I was
-pretty excited about this. I've been receiving invitations from all of my
-friends on my old non-apps GMail account for some time now, so I was eager to
-move them on over to my Google Apps account.
-
-[[enter-google-plus]]
-Enter Google Plus
------------------
-
-As soon as it was enabled for my account, I went straight to the control panel
-to turn it on. I was met with an unfortunate message indicating that I needed
-Google Chat enabled to use Google Plus (it was disabled because I run my own
-Jabber server on bitnode.net and not on a subdomain). My thought was "I'll just
-enable it for a little while and then turn it back off once I've had my fun"...
-so off I went.
-
-Oops
-
-As it turns out, when you try to turn off Google Chat, it just won't go. I
-need to clarify here before going on. When I say turn off, I mean you disable
-Chat and uninstall it. When I say "it just won't go" I mean, it doesn't show up
-in your control panel as either installed or enabled, but when your Jabber
-server tries to connect to the Google Chat servers to check for the statuses of
-all of your friends, you receive the following error:
-
-----
-=INFO REPORT==== yy-mm-dd hh:mm:ss ===
-D(<0.384.0>:ejabberd_receiver:320) : Received XML on stream = "<stream:error><undefined-condition xmlns=\"urn:ietf:params:xml:ns:xmpp-streams\"/><str:text xmlns:str=\"urn:ietf:params:xml:ns:xmpp-streams\">'''bitnode.net is a Google Apps Domain with Talk service enabled.'''</str:text></stream:error></stream:stream>"
-----
-
-I have Googled around, and even used Bing to see if there is a workaround for
-this. Sadly, all I have found are people having the same issues. My guess (or
-perhaps just a hope) is this is just a bug caused by the introduction of Plus
-to Google Apps since everything seems to work fine with no errors when you
-disable Chat. Time will tell. My Google Plus and Google Talk have been disabled
-since October 28 and still no change sadly. Here are most of the resources I
-have found referencing this issue.
-
-* http://jcsesecuneta.com/labox/google-apps-xmpp-chat-bug[John Cuneta's
-blog talking about having the same issue]
-* http://jcsesecuneta.com/labox/google-plus-for-google-apps-is-not-xmpp-jabber-friendly[John
-Cuenta's second post regarding this issue]
-* http://www.google.com/support/forum/p/Google+Apps/thread?tid=4aceb036c7ff5abe&hl=en&fid=4aceb036c7ff5abe0004b13a929df8ea[The
-Google Support Thread]
-* http://www.google.com/support/forum/p/Google+Apps/thread?tid=1c3107cfc528d6fa&hl=en[The
-Other Google Support Thread]
-* http://www.google.com/support/forum/p/Google+Apps/thread?tid=0f021783dd77e152&hl=en[The
-Other Other Google Support Thread]
-* http://olegon.ru/showthread.php?t=11181[Olegon's Forum Thread (на
-русском языке)]
-
-
-Category:Google
-
-Category:XMPP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/How_to_Uninterest_Me_in_Your_Job_Opening.ascii b/src/How_to_Uninterest_Me_in_Your_Job_Opening.ascii
deleted file mode 100644
index 630b54f..0000000
--- a/src/How_to_Uninterest_Me_in_Your_Job_Opening.ascii
+++ /dev/null
@@ -1,87 +0,0 @@
-How to Uninterest Me in Your Job Opening
-========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have been recieving an incredible amount of job calls and emails lately. I
-mean, so many that I'm concerned my cell phone minutes will go over my alotted
-450 (sure, I don't have to answer those calls). For whatever reason, I actually
-read through most of these emails. However, lately my brain has been tuning its
-spam filters to reduce the strain of going through that much junk. For you head
-hunters out there, here is my personal list of easy ways to uninterest me in
-your job opening. You might want to pay attention because I suspect I speak for
-a lot of people.
-
-1. Using words like **URGENT**, **NEED**, **ASAP**, and/or *OPPORTUNITY*
-+
-This will not make me want to answer your email any faster. It will, in fact,
-make me sad to see an email from you and not want to do business with you ever.
-
-2. Writing subject lines all or mostly in caps
-+
-Your excessive use of caps lock will only deter me from reading your email. All
-caps says to me "no one is interested in this job for a good reason so I will
-resort to doing whatever I can to make sure that people read the subject of
-this email".
-
-3. Telling me there is a need for a <job title> in my area
-+
-You see, now it just looks like you're spamming me using key words from my
-website. If you have a job in my area, tell me what the job is and who it's for
-- not that there is one.
-
-4. Describing in the job requriements that I must be able to "operate well
- within time constraints and be able to multi-task in a fast-paced
- environment"
-+
-I understand there are some exceptions to this but most jobs require this. In
-fact, I can't remember a single recruiter email yet that hasn't mentioned this.
-
-5. Use the words "fast-paced environment"
-+
-This one is almost a sure fire way of getting me to delete your email in a
-fast-paced way. I have had really bad experiences with this one. I'm sure you
-mean well and intend this to say "__dynamic changing environment__", but to me
-this says "__lots and lots of unpaid overtime__".
-
-6. Make it clear to me that you found me only on a few keywords on my resume
- and offer me a job that is not even remotely close to what I do.
-+
-Just because the keyword _Java_ is in "Java Application Administrator" on my
-resume doesn't mean I'm a "Java programmer".
-
-7. Offer a 3 to 4 month contract in another state where the cost of living is
- far higher than where I am now
-+
-Does anyone ever take these? I just don't see a reason to move to another state
-for "3 to 4 months" other than just wanting to visit that state.
-
-8. A job regarding ways to make SuperUberplexes of moneys from home working
- only a small number of hours per week
-+
-Call me closed minded but I view these as either pyramid schemes or
-get-rich-quick schemes. I've seen many of these and they either land people in
-jail or fizzle out with little to no consequence or profit.
-
-9. Call or email me when you clearly know only a small amount of English
-+
-What do you think outsourcing your recruiters says to me? If I ask a very
-simple question and it is not understood, not because I didn't phrase it right
-but because the recruiter doesn't know the words, I will not be very interested
-in your job opening or company.
-
-10. Send me an email that is obviously a generated template
-+
-The sentence "Our records show that you are an experienced IT professional with
-experience In . Net Developer This experience is relevant to one of my current
-openings. <Paste opening title from website here>" is clearly generated.
-Firstly, the word *in* should not be capitalized In the middle of a sentence,
-even if In a title. Also, it's **.Net**, not **. Net**. Additionally, you
-missed a period. Most likely when you copied and pasted the job title in your
-sentence you overwrote the period at the end.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Hyper-V_and_Vista.ascii b/src/Hyper-V_and_Vista.ascii
deleted file mode 100644
index b4728ff..0000000
--- a/src/Hyper-V_and_Vista.ascii
+++ /dev/null
@@ -1,48 +0,0 @@
-Hyper-V and Vista
-=================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Today I built out 8 more servers for our dev team. We have a blade server
-hosting all of this with Hyper-V. Here's what my picture looks like...
-
-I log into my Vista work machine, use terminal services to log into the blade
-and open up Hyper-V Manager. From there I connect to the machine via Hypervisor
-console.
-
-Essentially, I have a remote window inside of a remote window. Naturally,
-mouse and keyboard key sends are at less-than-desireable speeds. My hopeful
-solution: Hyper-V management console on Vista.
-
-Over the last year or so I have been hearing talks about how one could not
-install Hypervisor on Vista so naturally my hopes were already somewhat
-crushed. Despite the dire situation, I started the search (thanks Google) and
-much to my suprise, with Vista SP1 Microsoft released a patch
-(http://support.microsoft.com/kb/952627[KB952627]) to install the Hyper-V
-Console through Windows Update (thank you Microsoft).
-
-Here are the links (Windows authenticity check required)
-
-Vista x86
-http://www.microsoft.com/downloads/details.aspx?FamilyID=a46d0047-e383-4688-9449-83373226126a&amp;displaylang=en&amp;Hash=gUz0Srl8YL8V57oEvToZsTEga7tWBKPgtjBsGst7kRZwF96bbYMMRWbS3gQJnXWBzg24xhBYw6Zlw3ZNZ8C%2bgg%3d%3d
-
-Vista x64
-http://www.microsoft.com/downloads/details.aspx?FamilyID=f10e848f-289c-4e04-8786-395371f083bf&amp;displaylang=en&amp;Hash=AXftxujSp7eaWx3FURGL1rsoJAoqt0jtSsZfn/Ppq%2bSQXBuWEJ2010LWN8to%2b9azkVXBA/cXS3ONLqYZtBoCDA%3d%3d
-
-Once the management console is installed, you should be able to remotely
-connect to your server with Hyper-V.
-
-Once again, thank you Microsoft for a very satisfying product.
-
-
-Category:Microsoft
-
-Category:Windows
-
-Category:Virtualization
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/IOHQ_Status.ascii b/src/IOHQ_Status.ascii
deleted file mode 100644
index 930c372..0000000
--- a/src/IOHQ_Status.ascii
+++ /dev/null
@@ -1,14 +0,0 @@
-IOHQ Status
-===========
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-=== There are currently 100 iohq posts!
-
-Well that wasn't as awesome as I thought it'd be.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Indenting_in_VI.ascii b/src/Indenting_in_VI.ascii
deleted file mode 100644
index d0e9932..0000000
--- a/src/Indenting_in_VI.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-Indenting in Vi
-===============
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Just a quick post here. Today I was writing a perl script and I had a 29 line
-block of code I wanted to indent. After a bit of research (thanks
-stackoverflow), I found what I was looking for.
-
-If you want to indent a block surrounded by braces/brackets, select the top
-brace/bracket with the cursor...
-
-Type +<%+ to indent the entire block
-
-Type +<%+ to unindent the entire block
-
-This is without going into insert mode (hint: if vi is actually typing your
-commands, hit escape).
-
-To indent based on a number of lines, use the following
-
-Type +5>>+ to indent five lines starting where your cursor is
-
-Type +9>>+ to unindent nine lines starting where your cursor is
-
-I hope that helps someone. It made my day a few notches better, that's for
-sure.
-
-That's all for now. Signing out.
-
-
-Category:Linux
-Category:Editors
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Install_Java_6_on_Debian_Lenny_5.0.ascii b/src/Install_Java_6_on_Debian_Lenny_5.0.ascii
deleted file mode 100644
index 23b5d24..0000000
--- a/src/Install_Java_6_on_Debian_Lenny_5.0.ascii
+++ /dev/null
@@ -1,64 +0,0 @@
-Install Java 6 on Debian Lenny 5.0
-==================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Despite its numerous problems and frequent needs for updates, Java is still
-used for a lot of software. That sadly means it must be installed to run Java
-applications. Due to the fact that Java is not open source, it classifies as a
-non-free install in linux and therefore cannot be available by default due to
-some legal jibberjabber (yeah, just made that up).
-
-Here's how it's installed on Debian Lenny.
-
-First off we need to modify our repository sources to search and install
-non-free software. To do this, let's open 'er up in our favorite editor VIM.
-
-Seriously though, you're welcome to use another editor if you want to. I will
-only judge you a little. :)
-
-----
-vim /etc/apt/sources.list
-----
-
-From here we need to make a few modifications. The following sources.list is
-from a fresh default install of Debian. *.
-
-----
-#
-#deb cdrom:[Debian GNU/Linux 5.0.6 _Lenny_ Official i386 NETINST Binary-1 20100905-11:24]/ lenny main
-#deb cdrom:[Debian GNU/Linux 5.0.6 _Lenny_ Official i386 NETINST Binary-1 20100905-11:24]/ lenny main
-deb http://ftp.us.debian.org/debian/ lenny main '''contrib non-free'''
-deb-src http://ftp.us.debian.org/debian/ lenny main
-deb http://security.debian.org/ lenny/updates main '''contrib non-free'''
-deb-src http://security.debian.org/ lenny/updates main
-deb http://volatile.debian.org/debian-volatile lenny/volatile main
-deb-src http://volatile.debian.org/debian-volatile lenny/volatile main
-----
-
-Basically all we just did was add "contrib non-free" to the end of two
-repositories. Not too bad, eh?
-
-Next we need to update our package manager. To do this...
-
-----
-apt-get update
-----
-
-Finally, install the Java software you need. In my case, sun-java6-jre
-
-----
-apt-get install sun-java6-jre
-----
-
-Annnnnnd away we go!
-
-Category:Linux
-Category:Debian
-Category:Java
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installation_of_Aptana_Studio_into_Eclipse.ascii b/src/Installation_of_Aptana_Studio_into_Eclipse.ascii
deleted file mode 100644
index 039fe28..0000000
--- a/src/Installation_of_Aptana_Studio_into_Eclipse.ascii
+++ /dev/null
@@ -1,36 +0,0 @@
-Installation of Aptana Studio into Eclipse
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hello all, I recently attempted installing the Aptana (web dev functionality
-based off of Eclipse) studio after messing up my stand-alone version (which was
-outdated anyways). The installation process seemed relatively simple but upon
-install, I received an ambiguous error message saying:
-
-----
-An error occurred while installing the items
-session context was:(profile=PlatformProfile, phase=org.eclipse.equinox.internal.provisional.p2.engine.phases.Install, operand=null --> [R]org.eclipse.ant.ui 3.4.1.v20090901_r351, action=org.eclipse.equinox.internal.p2.touchpoint.eclipse.actions.InstallBundleAction).
-The artifact file for osgi.bundle,org.eclipse.ant.ui,3.4.1.v20090901_r351 was not found.
-----
-
-After searching this for a bit, I stumbled upon a bug report for Eclipse that
-addressed this issue.
-
-https://bugs.launchpad.net/ubuntu/+source/eclipse/+bug/477944
-
-The simple solution, crack open a fresh new terminal window (or one you already
-have open of course) and type in **sudo apt-get install eclipse-pde**.
-
-Attempt reinstalling the Aptana plugin and all should go smoothly now.
-
-
-Category:Aptana_Studio
-
-Category:Eclipse
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installing_Gimp_2.7_via_a_PPA.ascii b/src/Installing_Gimp_2.7_via_a_PPA.ascii
deleted file mode 100644
index b63228f..0000000
--- a/src/Installing_Gimp_2.7_via_a_PPA.ascii
+++ /dev/null
@@ -1,50 +0,0 @@
-Installing_Gimp_2.7_via_a_PPA
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Hello all,
-
-For those of you who foam at the mouth to get your hands on the latest and
-greatest copy of a certain software, specifically in this case Gimp, you might
-have interest here.
-
-I recently read a post on the Gimp website that said Gimp would finally be
-moving to a single window with docks and away from it's flying free method
-where your tools are in their own windows that can be put anywhere. That being
-said, I am quite eager to get a look at 2.8. Sadly though, 2.8 is not available
-yet(as far as I know at least). Version 2.7 is still in beta, but it IS in
-beta.  :)
-
-image:files/gimp271-sm.jpg[gimp271-sm.jpg,title="gimp271-sm.jpg"]
-
-Here's how we install it on Ubuntu Lucid Lynx
-
-Crack open a terminal and type
-
-----
-sudo apt-add-repository  ppa:matthaeus123/mrw-gimp-svn
-----
-
-After you've added that repository, type...
-
-----
-sudo apt-get update
-----
-
-Once our list of repositories is updated, let's install gimp 2.7
-
-----
-sudo apt-get install gimp
-----
-
-That's it! Enjoy all the new functionality.
-
-Category:Linux
-Category:Debian
-Category:Ubuntu
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installing_KDE_4.6_in_Debian.ascii b/src/Installing_KDE_4.6_in_Debian.ascii
deleted file mode 100644
index 9b98137..0000000
--- a/src/Installing_KDE_4.6_in_Debian.ascii
+++ /dev/null
@@ -1,54 +0,0 @@
-Installing KDE 4.6 in Debian
-============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Installing KDE on Debian is a pretty simple task. Getting the latest and
-greatest of KDE (or anything), is another matter especially if you want it from
-a repository.
-
-I searched around the interwebs for some time before stumbling upon the
-http://qt-kde.debian.net/[Debian QT/KDE team site]. As it turns out,
-there is actually a repo for the latest of KDE, saving all of us quite a
-bit of time compiling the .deb files for an entire GUI. Thankfully,
-setup and installation is a breeze (thanks apt-get). First you need to
-add the repo to your sources.list file. To do this, crack open your
-favorite editor (mine is vi) and edit the following file
-
-----
-/etc/apt/source.list
-----
-
-Once you're in the file, add the following lines:
-
-----
-deb http://qt-kde.debian.net/debian experimental-snapshots main
-deb-src http://qt-kde.debian.net/debian experimental-snapshots main
-----
-
-Save your sources.list file and run the following commands:
-
-----
-aptitude install pkg-kde-archive-keyring apt-get update
-----
-
-Finally, we install the latest version of KDE
-
-----
-apt-get install kde
-----
-
-And that's it. Add the repo to your sources.list file, get the repo key,
-update, and install. Beats the pants off of compiling it yourself, huh
-(especially when you're doing it on a machine like mine)?
-
-
-Category:KDE
-Category:Debian
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installing_Team_Foundation_Server_2008.ascii b/src/Installing_Team_Foundation_Server_2008.ascii
deleted file mode 100644
index ae58d41..0000000
--- a/src/Installing_Team_Foundation_Server_2008.ascii
+++ /dev/null
@@ -1,183 +0,0 @@
-Installing Team Foundation Server 2008
-======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-The installation of TFS 2008 can be a daunting task if the right documentation
-is not used. This post will cover the installation of Team Foundation Server
-2008 and all of its prerequisites.
-
-To preface, this post will document a *installation of Team Foundation Server
-2008 on a Windows Server 2008 server using SQL 2008 on the backend.
-
-Here’s the quick rundown of what will be done.
-
-* Install IIS 7.0
-* Install SQL Server 2008
-* Install SharePoint
-* Install Team Foundation Server 2008
-
-Before the install of Team Foundation Server you must have service pack one
-integrated into your install media. Microsoft has outlined how to integrate SP1
-into your install media http://support.microsoft.com/kb/969985[here].
-
-To save text space on each installation process, I will be simply saying where
-to go to install the specified software rather than doing a step-by-step
-tutorial. Lets get started
-
-
-[[installation-of-iis-7.0]]
-Installation of IIS 7.0
------------------------
-
-Here’s a good one. To install this one, head to the Server Manager window. From
-there, go to install the IIS 7.0 *Role* (it is called *Web Server (IIS)* in the
-wizard). When you select it to be installed, you will be prompted to install
-two more features additionally, assuming this is a clean install. Accept the
-install of the two additional features and continue. The role services that
-need to be installed are:
-
-* HTTP Redirection
-* ASP.Net (Add required role services as well)
-* IIS 6 Management Compatibility
-
-Click through the windows until IIS 7.0 is installed. Though it is not
-required, I always do a restart after the installation of a new role or
-service, just to be safe.
-
-
-[[installation-of-sql-server-2008]]
-Installation of SQL Server 2008
--------------------------------
-
-Yet another exciting step (like every step in this process) is the installation
-of SQL Server 2008. Insert the DVD (or mount the iso) and run the setup.exe on
-the disk. Select the *New SQL Server stand-alone installation or add features
-to an existing installation* option under the Installation page. Enter your
-license key on the window that comes up and proceed through the various
-prompting windows until you get to the Feature Selection screen. The features
-that need to be installed are:
-
-1. Database Engine Services
-2. Full-Text search
-3. Analysis Services
-4. Reporting Services
-5. Management Tools – Basic
-
-For the Instance Configuration, the Named instance field can be anything. I
-personally use the default instance and Instance ID for simplicity. For Server
-Configuration I used the NT AUTHORITYNETWORK SERVICE “account” for all of the
-services. Also make sure that SQL Server Agent starts up automatically and not
-manually. The other three should be automatic startup by default.
-
-On the Database Engine page the Microsoft documentation suggests Windows
-Authentication. I believe that that method for authentication has it’s purposes
-but for my purposes, I use *Mixed Mode* authentication. Don’t forget to add the
-user(s) you want to have sysadmin access to your SQL instance. If you forget
-this step, you won’t be able to get into your instance unless you find a way to
-enable the SQL SA account without having to authenticate.
-
-Add the users you want to have access to the analysis services on the Analysis
-Services Configuration page and continue. For the Reporting Services
-Configuration page, select to **Install, but do not configure the report
-server**. The Team Foundation Installer will do this for you later.
-
-For the last few pages, just click through them (make sure to check if you want
-Microsoft to receive usage reports from your instance). Review your install to
-make sure everything is as it should be and install SQL server.
-
-Popcorn anyone?
-
-
-[[installation-of-sharepoint-products-and-technologies]]
-Installation of SharePoint Products and Technologies
-----------------------------------------------------
-
-Before the installation of SharePoint, we need to do a prerequisite install.
-Head to the Server Manager and add the .NET Framework 3.0 feature. On my server
-instance, this was actually already installed so I didn’t need to install it.
-Simply make sure that you have it installed or you will run into problems later
-on. Now, for the installation of SharePoint Products and Technologies. Head to
-the appropriate link to download download Windows SharePoint Services 3.0 with
-Service Pack 2. *x86*
-http://www.microsoft.com/downloads/details.aspx?familyid=EF93E453-75F1-45DF-8C6F-4565E8549C2A&amp;displaylang=en
-http://www.microsoft.com/downloads/details.aspx?familyid=EF93E453-75F1-45DF-8C6F-4565E8549C2A&amp;displaylang=en
-
-Run the SharePoint.exe file to get started with the installation. After
-accepting the license agreement, we find ourselves at a fork in the road.
-Select *Advanced* to do a customized install. The server type should be **Web
-Front End**. On the Feedback tab decide whether or not to share usage reports
-with Microsoft. Click **Install Now**. After the installation has completed,
-Make sure the *Run the SharePoint Products and Technologies Configuration
-Wizard now* is checked and click **Close**.
-
-In the Configuration Wizard, Select *No, I want to create a new server farm*
-and click **Next**. For Database server type the name your database is hosted
-on. In the case of a single-server install of TFS, this will be the hostname of
-the server that you are installing SharePoint on.
-
-Choose the name of the SharePoint database or leave it default (I used
-default). Input the username and password for the service account (can be the
-TFSService account) and click **Next**. On the next page, be sure to remember
-the port you choose for your Central Administration web application. It can be
-recovered relatively easily but it’s just best to remember now. Select *NTLM*
-and click **Next**. Review your settings and finalize the install. Finally, we
-need to run a few command line commands. Open a command prompt as admin and
-navigate to **C:\Program Files\Common Files\Microsoft Shared\Web Server
-Extensions\12\bin**. First, run *stsadm.exe -o extendvs -exclusivelyusentlm
--url <nowiki>http://<ThisServersName>:80</nowiki> -ownerlogin DomainUsername1
--owneremail "admin@localhost" -sitetemplate sts -description "Default Web
-Site"* DomainUsername1 should be the account you want to have admin privileges
-on the port 80 SharePoint web application. I used mixed authentication so I
-gave this the service account for TFS and SQL. Next, run
-
-'''stsadm.exe -o siteowner -url http://<ThisServersName>:80 -secondarylogin
-DomainUsername2 '''In this case, DomainUsername2 represents the user you want
-to be your secondary administrator for your SharePoint port 80 web application.
-
-
-[[installation-of-team-foundation-server-2008]]
-Installation of Team Foundation Server 2008
--------------------------------------------
-
-Welcome to the final step in this installation process (TFS 2008 configuration
-will be in a different post). I won’t slow us down with any detailed intros.
-With that, let’s get started. Insert your installation medium (once again, I
-used an iso mounted through Hyper-V). Start up the installation, agree to the
-TOS (if you actually do) and head on to the next screen. After clicking Next a
-few times, you’ll find yourself at the *Team Foundation Database Server* page.
-As I mentioned earlier in the post, I’m doing a single server install this time
-which means my TFS database is hosted on a local instance of SQL. The installer
-should fill out the local server name for you. Since we’re doing a single
-server install, click **Next**. Sit back and relax for a few minutes while the
-installer runs a **System Health Check**. Once the health check is complete,
-click '''Next '''to head to the *Team Foundation Server Service Account*
-screen. Once there, specify the account you want TFS to run as. In my case I
-chose a domain account for access reasons. Click **Next**. On the Reporting
-Services Data Source Account screen, input the information for the account you
-want TFS to run reports as. In my case, I elected to go with '''Use Team
-Foundation Server service account '''since my SQL reporting runs as that user.
-Click **Next**. The installer should automatically fill in the information for
-you on the *Windows SharePoint Services* screen. In my case though, the Central
-Administration URL was incorrect for some reason (the port was one number off)
-so make sure that everything is right before continuing. Click **Next**. Here
-we are at the *Specify Alert Settings* page. If you wish TFS to notify you (or
-anyone else) of various build events (this is configurable), check the *Enable
-Team Foundation Alerts* checkbox and fill in the information for *SMTP server*
-and '''From e-mail address '''fields. Click **Next**. On the *Ready to Install*
-page, review your settings. If everything is correct, click **Install**.
-
-There you have it... a fresh install of Team Foundation Server 2008.
-
-
-Category:Microsoft
-
-Category:Team_Foundation_Server
-
-Category:Visual_Studio
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installing_Team_Foundation_Server_2010_Beta_1.ascii b/src/Installing_Team_Foundation_Server_2010_Beta_1.ascii
deleted file mode 100644
index a98e594..0000000
--- a/src/Installing_Team_Foundation_Server_2010_Beta_1.ascii
+++ /dev/null
@@ -1,200 +0,0 @@
-Installing Team Foundation Server 2010 Beta 1
-=============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-The installation of Microsoft's latest beta release of Team Foundation Server
-2010 has apparently been quite a hot topic in IT lately. My Twitter page isn't
-that popular and when I first started tweeting my progress, shortly after I was
-receiving messages regarding my documentation and progress. Here is the shortly
-awaited documentation on what I've done to install TFS.
-
-Here's what my environment looks/will look like:
-
-* Windows Server 2008 Enterprise (with hyper-v but that's irrelevant)
-* SQL Server 2008 Standard
-* WSS 3.0
-* Sharepoint 2007 Enterprise
-* Team Foundation Server 2010 Beta 1 (of course)
-
-There are a few ways this environment can be changed around (which versions
-such as Enterprise or Standard can be used). Check out Microsoft's TFS
-installation documentation for this information (a convenient .chm file...check
-my blog post on this if you have issues opening this file).
-
-Additionally, _this post documents a single-server installation on a
-64-bit machine_.
-
-Here's a summary of the order of software installation that will be taking
-place.
-
-* Windows Server 2008 (we have to have an operating system)
-* IIS 7
-* SQL Server 2008
-* Team Foundation Server 2010 Beta 1 (with WSS 3.0)
-
-If it isn't obvious yet, this is going to be a long post. I've taken
-screenshots of many of the steps which won't help the length. Is everyone
-buckled in? Good. Let's get started.
-
-
-[[installing-windows-server-2008]]
-Installing Windows Server 2008
-------------------------------
-
-The install of Windows Server really isn't that complicated. There are not
-special requirements for this. However, post-install, my server was added to a
-domain so I could use domain service accounts. It isn't necessary to have the
-server added to a domain though.
-
-[[installing-iis-7.0]]
-Installing IIS 7.0
-------------------
-
-This part really isn't too bad (thankfully). In the server management, on the
-left click **Roles**. On the right, click **Add Role Services**.
-
-image:IIS_01_Add_Role.jpg[height=300]
-
-image:IIS_02_Role_Services.jpg[height=300]
-
-Here some default values are selected. You need to add **HTTP Redirection**,
-**ASP.Net**, **Windows Authentication**, and *IIS 6 Management Compatibility*
-and all subordinate check boxes. Click **Next**.
-
-Here your selections are reviewed. If everything is correct, click **Install**.
-Once the install has completed, you'll see another review window. If everything
-was successful, click **Close**.
-
-
-[[installing-sql-server-2008]]
-Installing SQL Server 2008
---------------------------
-
-Here's where the real fun begins. This install isn't too bad. The real ticket
-is to know if you have any service accounts you want to use for the SQL
-services. Since I did a single-server install on a server dedicated to my team,
-I used NT Authority/Network Service for mostly everything, but I'll get to that
-a little later.
-
-First off, insert the SQL installation media (I used an ISO file mounted
-through Hyper-V for mine).
-
-From here, run the setup executable on the disk. Mine did an autorun. From
-that window, I selected **installation**on the left navigation pane. On the
-screen that loads on the right, select **New SQL Server stand-alone
-installation or add features to an existing installation**.
-
-The screen that comes up will run five tests. If your installation is clean
-than most likely all five will pass with a green check mark. Mine threw a
-warning to me on Windows Firewall because mine was turned on with default
-settings. Since the server is behind two firewalls, I elected to disable the
-Windows firewall completely. Re-running the test after that resolved the
-warning. Click **Okay**.
-
-Here we are at the obligatory Product Key screen. Enter your product key and
-hit **Next**.
-
-If you accept the license terms (License Terms page), check the box and click
-**Next**.
-
-The next screen wants to install setup support files. Go ahead and click
-*Install* to continue. The following screen will yet again, run some more tests
-(eleven to be precise). Again, if this is a clean install, every test should
-pass.
-
-Here's one of the crucial screens: *. On this screen, you should select to
-install *Database Engine Services* (for TFS), *Full Text search* (for
-reporting), *Reporting Services* (for reporting), **Analysis Services**,
-**Client Tools Connectivity**, and **Management Tools Basic**. Once those are
-checked, click **Next**.
-
-image:SQL_03_Instance_Configuration.jpg[height=300]
-
-Hit *Next* to continue to the * screen. If you want to, you can rename this SQL
-instance to whatever you want it to be. I chose the default MSSQLSERVER since
-it will be used for TFS only and nothing else will be connecting to it. Click
-**Next**.
-
-Click *Next* on the *screen.
-
-Here we are at the Server Configuration section. Unless you have any specific
-domain accounts set up for running SQL, NT AUTHORITYNetwork Service will
-suffice for all of the accounts listed. No password is required to use this
-username. Also be sure to change *SQL Server Agent* to start up automatically
-(by default it is manual). Click **Next**.
-
-The Microsoft documentation suggests on the Database Engine Configuration page
-that Windows authentication mode be checked. I have had some pretty nasty
-experiences with this in the past and selected **Mixed mode authentication**.
-Following this, you need to type in a password for the SQLSA user. Also, don't
-forget to add all of the users you want to have access to the DB engine. Once
-you're done with that, click next.
-
-The next page is the Analysis Services Configuration page. Add any users you
-want to have access to the analysis services that your SQL instance will
-supply. Click **Next**.
-
-On the Reporting Services Configuration page, select **Install the native mode
-default configuration**. Click **Next**.
-
-Here's the obligatory Error and Usage Reporting screen. Check whether or not
-you want Microsoft to receive anonymous usage statistics regarding your SQL
-instance and click **Next**.
-
-Nearing the end, click *Next* on the Installation Rules screen.
-
-*Finally*
-
-Once the installation is complete, click *Next* and **Close**.
-
-
-[[installing-team-foundation-server-2010-beta-1]]
-Installing Team Foundation Server 2010 Beta 1
----------------------------------------------
-
-Here we are at the final software install. This part really isn't too bad
-(surprisingly enough).
-
-To begin, insert the installation media (once again, I mounted an ISO through
-Hyper-V). If autorun is enabled, a standard window should come up giving you
-the option to explore the newly inserted media. If this does not happen, just
-open up Computer and navigate to the disk.
-
-Inside the disk there are three folders. Depending on your processor
-architecture, choose either the TFS-x64 or TFS-x86 folders. From within that
-folder, run the Setup.exe file.
-
-Here we're at the first screen. Click *Next* to proceed.
-
-Once again, the ever-present Licensing Terms page. If you accept, check the box
-and hit **Next**.
-
-image:TFS_02_Features_to_Install.jpg[height=300]
-
-The Microsoft documentation suggests that only Team Foundation Server be
-checked. I actually need the build server to be on the Team Foundation Server
-as well so I checked all three. Either will work though. **Click Install**.
-
-image:TFS_04_MidInstall_Restart.jpg[height=300]
-
-During your installation, the server will need to be restarted. Click restart
-now. Upon restart, the configuration option will become available to you.
-
-Pat yourself on the back. You just installed TFS. This is a long enough blog
-post for now. I'll post here very shortly the configuration steps I took for
-TFS (still haven't taken all of the screenshots I need for it).
-
-Thanks for reading.
-
-Dirk
-
-
-Category:Microsoft
-
-Category:Team_Foundation_Server
-
-Category:MsSQL Category:IIS
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Installing_Visual_Studio_2008_Service_Pack_1.ascii b/src/Installing_Visual_Studio_2008_Service_Pack_1.ascii
deleted file mode 100644
index aad0156..0000000
--- a/src/Installing_Visual_Studio_2008_Service_Pack_1.ascii
+++ /dev/null
@@ -1,43 +0,0 @@
-Installing Visual Studio 2008 Service Pack 1
-============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Recently, I realized that a few of our developement servers were running Visual
-Studio 2008 with the beta of SP1. One would assume that the installation of a
-service pack wouldn't be too difficult but since they already had the beta of
-service pack 1 installed, the installation of SP1 became a bit more
-complicated.
-
-If you download the service pack installation file from Microsoft and run it,
-you get an error saying that you need to run the Service Pack Preparation Tool
-before being able to install. Head to the Microsoft website and download the
-removal tool.
-
-http://www.microsoft.com/downloads/details.aspx?FamilyId=A494B0E0-EB07-4FF1-A21C-A4663E456D9D&amp;displaylang=en#AffinityDownloads
-
-In my case, I ran the SP prep tool and received yet another error. It said that
-it need some files on the installation disk for **Visual Studio 2008 Shell
-(integrated mode) ENU**. The ticket here is that we don't have a disk for that
-and to my knowledge, there isn't one. Microsoft has a download for it but it's
-an executable that doesn't extract an iso. I searched around for a solution to
-the problem and found a site that said to simply uninstall *VS 2008 Shell*
-(listed in Programs and Features as Microsoft Visual Studio 2008 Shell...). I
-performed said action and the prep tool ran fine with no errors.
-
-After running the prep tool, I simply ran the installer for the service pack
-with yet again no errors.
-
-The install did, however, take about two hours (ugh).
-
-There you have it.
-
-
-Category:Microsoft
-Category:Visual_Studio
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Javadoc-style_Perl_Documentation_Generator.ascii b/src/Javadoc-style_Perl_Documentation_Generator.ascii
deleted file mode 100644
index 83d82c8..0000000
--- a/src/Javadoc-style_Perl_Documentation_Generator.ascii
+++ /dev/null
@@ -1,148 +0,0 @@
-Javadoc-style Perl Documentation Generator
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I'm not a huge fan of Java, but I really do appreciate their standards for code
-comments. I use them in my PHP, C+\+, and Perl code. There is obviously some
-changing that needs to happen becuase those languages don't all comment the
-same, but for the most part it works really well.
-
-Today I needed to write up a document on how one of my larger scripts/programs
-worked. I wanted to include the script architecture, but didn't have a good way
-to do it. Then I remembered something one of my favorite open source projects
-does. MediaWiki is doing continuous integration and so they use (as I know
-other OSS projects do) http://jenkins-ci.org/[Jenkins] to do post-commit
-validation. Specifically relating to this post, they use the Jenkins scripts
-to verify that the comments for each function are in the right format and
-contain the right data types, etc. In application to my project at hand, in my
-Perl scripts this would look something like...
-
-----
-#
-# This subroutine does something cool
-#
-# @param $_[0] string This is a test parameter
-# @param $_[1] array This is an array reference of mic checks
-#
-# @return bool Success or failure of this function's awesomeness
-#
-----
-
-The commit validation scripts Jenkins uses would check if the subroutine
-definition did in fact require two parameters and that the function returned
-boolean. Granted, since Perl isn't strongly typed, this has to be a bit looser
-than it would for other languages (C+\+, C#, etc), but you get the idea. This
-documentation style is still awesome (at least, I think it is)
-
-What I needed today though was a script that parsed my other scripts, read in
-all the subroutines (Perl, remember?), parsed out the comments for each one,
-and returned HTML using inline styles so I could copy it into a Word (well,
-LibreOffice Writer) doc without losing formatting. That said, here's the quick
-and dirty.
-
-**Note**: Ironically, I just realized that this script isn't commented.
-
-----
-#!/usr/bin/env perl
-use warnings;
-use strict;
-
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-if( scalar( @ARGV ) < 1 ) {
- print "\nPlease specify a file to parse.\n\n";
- exit( 0 );
-}
-
-main( @ARGV );
-
-sub main {
- my $path = $_[0];
- # Open our file and do some science!
- open FILE, $path or die $!;
- my @lines = <FILE>;
- close( FILE );
- my @subs;
- my $body = '';
- for( my $i = 0; $i < scalar( @lines ); $i++ ) {
- my $line = $lines[$i];
- # Remove leading spaces
- $line =~ s/^[\t\s]+//;
- # Remove multiple inner space
- $line =~ s/[\t\s]+/ /;
- if( $line =~ /^sub ([\d\w_-]+)[\s{]+$/ ) {
- my $h2 = "<h2 style=\"margin:0px; padding:0px; display:inline; font-size:1.2em; color:#444;\">";
- $body .= '<br />' . $h2 . $1 . "()</h2>\n";
- # We've found one!
- my $comments = '';
- # Now we go backwards, nabbing the comments as we go
- for( my $n = $i - 1; $n > 0; $n-- ) {
- if( $lines[$n] =~ /#[\w\d\s\t]*/ ) {
- # Becase we're now reading backwards,
- # we need to prepend
- $comments = lineToHtml( $lines[$n] ) . $comments;
- } else {
- # Exit and continue
- $n = 0;
- }
- }
- my $pStyle = "<p style=\"display:block; background-color:#eee; margin:0px;";
- $pStyle .= "padding:5px; border:1px dashed #aaa; width:90%; font-size:9pt;\">";
- $comments = $pStyle . $comments . "</p>\n";
- $body .= $comments;
- }
- }
- $body .= "\n\n";
- print bodyToHtml( $body );
- exit( 0 );
-}
-
-sub bodyToHtml {
- my $body = $_[0];
- my $bodyHeader = '<!DOCTYPE html />';
- $bodyHeader .= '<html><head>';
- $bodyHeader .= '</head><body style="font-family:sans-serif;">';
-
- my $bodyFooter = '</body></html>';
- return $bodyHeader . $body . $bodyFooter;
-}
-
-sub lineToHtml {
- my $line = $_[0];
-
- my $formatted = $line;
- $formatted =~ s/^[#\s\t]+//;
- $formatted =~ s/\n+//;
- if( $formatted =~ /^\@param/ ) {
- $formatted =~ s/\@param/<strong>\@param<\/strong>/;
- $formatted = '<br /><span style="display:block; color:#499;">' . $formatted . '</span>';
- } elsif( $formatted =~ /^\@return/ ) {
- $formatted =~ s/\@return/<strong>\@return<\/strong>/;
- $formatted = '<br /><span style="display:block; color:#494; margin-top:10px;">' . $formatted . '</span>';
- }
- $formatted =~ s/ (int|hash|array|string|boolean|bool) / <span style="color:#949; font-style:italic;">$1<\/span> /i;
- $formatted .= "\n";
- return $formatted;
-}
-----
-
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Kill_All_Connections_to_SQL_Database.ascii b/src/Kill_All_Connections_to_SQL_Database.ascii
deleted file mode 100644
index 130147a..0000000
--- a/src/Kill_All_Connections_to_SQL_Database.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-Kill All Connections to SQL Database
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently had to help one of our testers test an application. One of the tests
-to perform was to see how the web application handled losing its connection to
-the database in the middle of various operations.
-
-My first thought on how to do this was to simply take the database offline
-during a session. Unfortunately however,  SQL Server Management Studio won't
-kill current connections when this operation is attempted, rather it will error
-out.
-
-After searching around I found a query that in essence kills all connections to
-the database but one (single-user mode).
-
-*The query for this.*
-
-----
--QUERY NUMERO UNO
-ALTER DATABASE [DATABASE-NAME] SET SINGLE_USER WITH ROLLBACK IMMEDIATE
--And let's put this DB back in multi-user mode
-ALTER DATABASE [DATABASE-NAME] SET MULTI_USER
-----
-
-In this query, database-name is switched to single_user mode. The second query
-sets the database back to multi_user mode.
-
-Ah such simplicity.
-
-Category:MySQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Kubuntu_and_Bluetooth_Audio.ascii b/src/Kubuntu_and_Bluetooth_Audio.ascii
deleted file mode 100644
index 172f3d5..0000000
--- a/src/Kubuntu_and_Bluetooth_Audio.ascii
+++ /dev/null
@@ -1,65 +0,0 @@
-Kubuntu and Bluetooth Audio
-===========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I just recently made a switch to http://www.kubuntu.org/[Kubuntu] to test out
-their integration of KDE4.4. I must admit that I like this version. It's very
-visually appealing and generally works very well. Post-installation, I
-realized however that I had a very serious problem: my bluetooth headphones
-wouldn't pair with it.
-
-Now for those of you who don't know me, I'm a web developer and a systems admin
-for my company (and my house). If you know much about digital technology you
-also understand that tunes are an essential piece to forward motion on any
-project.
-
-All this being said, I scowered the interwebz for a solution to my problem.
-Thanks to a few arbitrary links, I discovered that the bluetooth manger in
-KDE4.4,*, will not pair with audio devices (and a few other types, but those
-aren't important for the sake of this post), this includes bluetooth headsets
-to be used with software such as Skype.
-
-Sadly, with all of that searching I discovered that there seems to be only one
-way to fix this: install the gnome bluetooth manager.
-
-With that, let's get started!
-
-Crack open a terminal and type in:
-
-----
-sudo apt-get install gnome-bluetooth pulseaudio pavucontrol
-----
-
-Here's that those packages do.
-
-* **gnome-bluetooth**: If it isn't already obvious, this is the gnome
-bluetooth manager.
-* '''pulseaudio ''': This line is in case you don't use pulseaudio for
-your KDE instance. Typically I believe KDE does not use pulseaudio. This is
-required because guess what! the KDE audio drivers don't support changing the
-output device from stereo to a bluetooth device.
-* **pavucontrol**: This is short for **P**ulse **A**udio **V**olume
-**Control**.
-
-From here, put your bluetooth device in discoverable mode, open up
-gnome-bluetooth (should be a rather misfit icon in the taskbar), and connect to
-your device.
-
-The second step in the process of connecting is telling your audio system to
-output to the headphones instead of the stereo. To do that, find the Pulse
-Audio Volume Control in your Kickoff menu and change your output to your
-bluetooth headset.
-
-The final step in this is to enjoy some high quality (hopefully) wireless music
-with a 33 foot tether. Enjoy!
-
-Category:Kubuntu
-Category:Ubuntu
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Let's_get_started..._again.ascii b/src/Let's_get_started..._again.ascii
deleted file mode 100644
index 605d866..0000000
--- a/src/Let's_get_started..._again.ascii
+++ /dev/null
@@ -1,35 +0,0 @@
-Let's Get Started... Again
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hi everyone, For those of you who don't know me, I'm the author of the
-http://bitnode.net/category/musings-of-a-systems-admin/[Musings of a Systems
-Admin] blog where I discussed many various areas relating to server security,
-troubleshooting, and the building and configuration of servers for Microsoft
-SharePoint, MSSQL, Microsoft Hyper-V, Windows Server 2008, Team Foundation
-Server 2010 and 2008, and various other server technologies out there.
-
-Judging from the title of this blog you have no doubt guessed that the
-aforementioned Microsoft ship has sailed and we're now boarding another
-exciting one for more adventures into the vast world of technology. It almost
-makes it sound like it'll be fun. Have no worries though, I will try to do my
-best to make these posts as painless but helpful as possible via the use of
-clever buzzwords (get out your buzzword bingo cards), an xkcd.com comic here
-and there, and vivid imagery and screenshots to depict the dull and grey world
-of programming...alright, it's not THAT boring, or is that just me?
-
-Here's to the hopefully painless, informational, and entertaining journey
-ahead.
-
-Additionally, here's to Ric who aptly dubbed me Timex.
-
-Cheers
-
-Timex
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:At_the_Office.ascii b/src/Linux:At_the_Office.ascii
deleted file mode 100644
index b9d5c69..0000000
--- a/src/Linux:At_the_Office.ascii
+++ /dev/null
@@ -1,228 +0,0 @@
-Linux:At the Office
-===================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have been running Linux on my laptop at home for the last four-ish years now
-and it's given me very little trouble. Mostly it's just been the growing pains
-of each of the projects. I just recently started running Linux on my laptop at
-work as well (if you manage Linux servers, why not use Linux to do it).
-Inevitably, the question has been asked numerous times "what open source Linux
-software out there can do this thing I need to do?" Usually when I start
-researching that though, I find myself wishing to know what everyone else uses
-and there just doesn't seem to be a lot of blog posts on that. That said, here
-we go.
-
-The things I do in my day usually entail the following
-
-
-[[email]]
-== Email
-
-Awwww yeah. This one is everyone's favorite topic I'm pretty sure. I recently
-read an article about how one of the greatest deficiencies of Linux is its lack
-of really solid mail clients. This is true to a certain extent. While Linux has
-a couple of pretty solid mail clients,
-http://projects.gnome.org/evolution/[Evolution] and
-http://www.kde.org/applications/internet/kmail/[KMail], they both still lack
-reliable Exchange support. Evolution has an Exchange mapi plugin, but it was
-pretty buggy for me. It also has support for Exchange EWS, but your exchange
-web services need to be set up correctly for that to work.
-
-The solution I found here, after an unfortunate amount of time hunting around,
-is called http://davmail.sourceforge.net/[DavMail]. I have to say that this
-little piece of software is really great. Exchange basically provides three
-main pieces of functionality: email, calendar syncing, and Active Directory
-address book searching and syncing. All three of these pieces have open source
-equivelants: IMAP+, CalDav, and CardDav. What DavMail does is connect to the
-Exchange server and provide a local server for each of these services. With
-this you need not make any wonky changes to your mail client or use any
-unstable plugins. You simply use what's already tried and true (and open source
-if that's important to you): IMAP, CalDav, and CardDav.
-
-
-[[vpn]]
-== VPN
-
-My company uses two VPNs at present because we are <span
-style="text-decoration:line-through">stuck</span> in the middle of a transition
-from one to the other. That unfortunately means that I need two VPN clients.
-Thankfully though, the open source folks have come through on yet another
-awesome competitor to a proprietary alternative. The first VPN client I use is
-called http://www.unix-ag.uni-kl.de/~massar/vpnc/[vpnc]. This one is for
-Cisco's standard VPN server. The other client I use is called
-http://www.infradead.org/openconnect/[openconnect]. This one is for interfacing
-with Cisco's AnyConnect.
-
-
-[[internet-browsing]]
-== Internet Browsing
-
-This one took me a little bit to get sorted out. Don't get me wrong - I like
-Firefox. It's just a really heavy browser. It takes a very long time to come up
-from a cold boot and also takes a lot of RAM while it's running. Understandably
-so though, that browser does just about everything. To sum it up now so you
-don't have to read the rest of my ramblings on this particular topic, I ended
-up using https://mozilla.org[Firefox].
-
-Now, to cover the reason why... I really like the
-http://surf.suckless.org/[surf] browser (this browser is so tiny you can easily
-count its size using kilobytes) as well as http://midori-browser.org/[Midori]
-(a clean and small apparent [from the ui] fork of chromium), but they both lack
-something one really needs working in a big corporation - Microsoft's NTLM
-authentication. If I try to log in to any SharePoint site, I am immediately
-sent to a 401 error page (not authorized) without even being presented with a
-login box. Firefox, however, has NTLM built in so that's the one I use now.
-
-
-[[programmingdevelopment-environment]]
-== Programming/Development Environment
-
-Almost every day I'm writing a script or program of some sort in Perl, C\+\+,
-PHP, bash, or ksh. All of this programming occurs in http://www.vim.org/[vim].
-I won't lie, I heart vim. There's not much more to say here.
-
-If you don't know vim but are interested in learning, I highly recommend it. If
-you think keyboard shortcuts aren't worth the time they can save you, just move
-along. If however you are in that group but are still interested in command
-line editing (it does have its perks after all),
-http://www.nano-editor.org/[Nano] is a good option for you. Otherwise in the
-realms of guis, I'd say http://bluefish.openoffice.nl/index.html[Bluefish] is a
-good option and http://tarot.freeshell.org/leafpad/[Leafpad] is a good one
-(albeit very basic) for you minimalist folks.
-
-
-[[general-office-authoring]]
-== General Office Authoring
-
-This means Word documents and Excel spreadsheets. I use
-http://www.libreoffice.org/[LibreOffice] for this. In this category, we've got
-some pros, but we definitely have some cons.
-
-The pros are all pretty obvious here. A mostly fully functional office
-authoring suite, nearly equivelant to a multi-hundred dollar suite of software
-is a pretty big pro, especially since it works almost flawlessly with
-Microsoft's formats. However, on the side of the cons (Kaaahhhhhnnnn!!!), we've
-got a few. Some of the more advanced and less used features of MS Word are not
-yet implemented, or not implemented in the same way in LibreOffice Writer. The
-biggest impact for me though is LibreOffice Calc. It's biggest defficiency in
-my experience is macros. It turns out that it uses a completely different
-macro language/syntax than MS Excel. This means that chances are, those
-drop-down cells that change your spreadsheet won't work at all. This is very
-problematic when your company publishes metrics using fancy Excel spreadsheets
-with hundreds of kilobytes of macros.
-
-
-[[documentation]]
-== Documentation
-
-I use two products, one because of superiority (in my opinion), and one out of
-necessity. The necessity is LibreOffice Writer, which is required because every
-big company seems to use SharePoint shared documents to do documentation,
-despite it's poor design, hungry indexer, and a versioning system that's less
-functional than adding the modification date to the document filename.
-
-Out of superiority though (again, my opinion), I use a wiki for documentation.
-Specifically http://www.mediawiki.org/wiki/MediaWiki[MediaWiki], though there
-are many other solutions out there. This enables my team to work
-collaboratively on their documentation. It's easily indexed and searched as it
-is stored in plain text. The markup is easy, and you don't have to fight with a
-http://en.wikipedia.org/wiki/WYSIWYG[wysiwyg] editor wrongly auto-formatting
-much of what you do. For a bigger compare and contrast of SharePoint and
-MediaWiki, I wrote link:MediaWiki_vs_SharePoint[a post] about this a ways back.
-
-
-[[versioning-work]]
-== Versioning Work
-
-This one isn't really something that's super applicable for most people I
-suspect. For versioning my files though, I have lots and lots of git repos. I
-have one for versioning all the documents I write/modify [because SharePoint's
-versioning is awful], and I have one repo per script that I write with all of
-my remotes pointing to bare repos sitting on one of our backed up servers. I
-readily admit this isn't the easiest way to do it for most folks, but for me, a
-git fanboy and engineer, git is by far the best [that I know of] and most fun
-way to do this for me. If I didn't have to do Word documents for documentation
-though, I would happily rely on MediaWiki's versioning functionality for all of
-my documentation needs (sounds a little like a commercial).
-
-
-[[bmc-remedy]]
-== BMC Remedy
-
-Nope, not going to link to it - it's not worth that much dignity. However, if
-you are unfortunate enough to have to deal with this software, it installs
-nicely in wine and in fact runs better on Linux than on Windows (oddly).
-
-Going back to the insult I just threw BMC's way, don't get me wrong, this
-software is neat. It does a good job tracking piles of metadata for ticket
-tracking. However, I have several reasons for disliking it so much. It's a
-super huge bandwidth sucker (go ahead, turn on tcpdump and watch what it does
-when you perform any action). It's also unbelievably slow (here's the bandwidth
-thing again) and is completely dependant on Internet Explorer 6 or greater,
-rather than being its own piece of independant software. Additionally, it's
-buggy and it's missing all kinds of interface conveniences that one would
-expect in something so robust and expensive. Here's to Service Now being a
-better product than its predecessor (I hope).
-
-
-[[connecting-to-windowssmb-shares]]
-== Connecting to Windows/SMB Shares
-
-I've had problems with this in the past in Linux land. For whatever reason, SMB
-share integration into file managers (thunar, nautilus, etc) has been pretty
-slow and buggy. However, if you have root access to your laptop, you can use
-http://www.samba.org/samba/docs/man/manpages-3/mount.cifs.8.html[mount.cifs] to
-mount the SMB share locally and that has yet to fail me. It's fast and stable,
-AND you can add it to your system's fstab. If you want to try SMB shares in
-your file manager though, install your distro's _gvfs_ and _gvfs-smb_ packags
-and close all of your file managers to reload things.
-
-
-[[transferring-files-from-nix-to-nix]]
-== Transferring Files From Nix to Nix
-
-This one is one of my favorites. The people surrounding the openssh project are
-truly geniuses in my mind. A lot of people transfer files from one Linux system
-to another by using scp to download the file to their local machine, and then
-use SCP to transfer that file from their local machine to the destination
-server. Depending on how things are set up, you may be able to scp files
-straight from server to server.
-
-There's this really neat thing out there called
-http://fuse.sourceforge.net/sshfs.html[sshfs]. Sshfs allows you to mount a
-remote filesystem locally...over SSH. There is no additional software
-installation or configuration required on your server other than having ssh
-installed and running. You can mount these filesystems and drag and drop files
-all over the place. It's a pretty great piece of sofware I do say so myself,
-and very stable too.
-
-Now, I typically use scp to transfer my files anyway. Where sshfs really comes
-in handy is when I need to work on remote files such as Word documents or Excel
-spreadsheets that are stored on the remote system. With sshfs I can mount the
-remote share locally and work "directly" on the files without having to scp
-them locally, work on it, save changes, and scp it back to the server.
-
-
-[[microsoft-office-communicator]]
-== Microsoft Office Communicator
-
-This one is a sensitive topic for a lot of people. Most of the people I know
-don't like MOC. Granted, most of the time that's because it's not set up right,
-not because the product itself is bad.
-
-To connect to a MOC server from Linux land, we need
-http://www.pidgin.im/[Pidgin] and a plugin for it called
-http://sipe.sourceforge.net/[Sipe]. With these two, you should be able to
-connect to the communicator server, send and receive messages, send and receive
-files, share desktops, and search Active Directory for users. It's a pleasantly
-functional plugin.
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Checking_CPU_Core_Usage.ascii b/src/Linux:Checking_CPU_Core_Usage.ascii
deleted file mode 100644
index edb06e8..0000000
--- a/src/Linux:Checking_CPU_Core_Usage.ascii
+++ /dev/null
@@ -1,55 +0,0 @@
-Linux:Checking CPU Core Usage
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This is mostly for my own future reference. Today I needed to to check the
-resource consumption of an application on a currently bare metal system so I
-could get a good idea how to spec out its new virtual home. Now, in Linux,
-checking cpu consumption is easy, but I wanted to check the _per core_ usage.
-The reason in this case was no one knew if this application was multithreaded
-(likely not if the application's stability is indicative of its code quality)
-and how well if it was. Giving a machine multiple threads to run a single
-threaded application is a bit pointless. That said, I found two ways to check
-per core usage that didn't involve installing additional packages on the system
-(http://hisham.hm/htop/[htop], I'm looking at you).
-
-[[mpstat]]
-mpstat
-~~~~~~
-
-Mpstat is a really cool program I happened upon today in my searches. It
-basically reports on every live stat you could ever want on a CPU.
-
-----
-mpstat -P ALL 2 10
-----
-That will report _all_ stats on all cpus every _2_ seconds, _10_ times.
-
-
-[[top]]
-top
-~~~
-
-I'd prefer not using something that's interractive so I can more easily use the
-data with other programs (like tr, cut, grep, etc), which is why I included
-this one second. With top, if you press the *1* key while it's running, it will
-print per-core cpu stats.
-
-----
-Tasks: 188 total, 1 running, 187 sleeping, 0 stopped, 0 zombie
-Cpu0 : 0.3%us, 0.0%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Cpu1 : 0.0%us, 0.0%sy, 0.0%ni, 97.0%id, 3.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Cpu2 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Cpu3 : 0.0%us, 0.0%sy, 0.0%ni,100.0%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st
-Mem: 4086584k total, 3951260k used, 135324k free, 24532k buffers Swap:
-8388600k total, 4203824k used, 4184776k free, 103416k cached
-----
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Comparing_Remote_with_Local.ascii b/src/Linux:Comparing_Remote_with_Local.ascii
deleted file mode 100644
index c681a3a..0000000
--- a/src/Linux:Comparing_Remote_with_Local.ascii
+++ /dev/null
@@ -1,118 +0,0 @@
-Linux:Comparing Remote with Local
-=================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Today I ran into quite the conundrum. I've been needing to compare the contents
-of a file on a remote server with the contents of a file on my local server
-over ssh. My original solution was to copy the file from one server to the
-other over sftp and compare their contents with diff.
-
-However, when you need to compare more than just the contents of a file, things
-can get really messy. Take for instance another situation where I needed to
-compare the contents of a directory. First, I needed to output the contents of
-a directory on BOTH servers into two files I had to create. Then I had to sftp
-into one server, copy it over to the other server, and run diff to compare
-their contents. Talk about complicated, yeah?
-
-All this copying and sftp-ing around frustrated me to the point where I wanted
-to find another solution. Sadly, my solution is a bit complicated for someone
-who doesn't know the linux command line super well, but at least it works and
-it works fast. I'll go slowly because if I don't, I won't get it myself either.
-
-----
-ssh username@ServerIP 'ls -1 /tmp' | diff <(ls -1 /tmp)
-----
-
-Here's our example. The end result of this example is to get a comparison of
-the contents of a remote /tmp directory and the contents of our local /tmp
-directory.
-
-First things first, we have to run a command remotely to get the contents of
-said remote directory. To do this, we run simply
-
-----
-ssh username@ServerIP 'ls -1 /tmp'
-----
-
-That gets a list of the files and folders in the /tmp directory. Specifically,
-the '-1' switch gives us one file or folder per line.
-
-Next up we pipe that into the diff command.
-
-For those of you who may not know about this functionality, piping basically
-takes the output of one command, and feeds it to another. In this case, we are
-taking the listed contents of a remote directory and feeding it to the diff
-command.  Now, we do this by using the following.
-
-----
-... | diff ...
-----
-
-Basically, the diff command works by finding the difference between the first
-thing it is given and the second thing it is given. Generally speaking, diff
-works like the following.
-
-----
-diff <file1> <file2>
-----
-
-In this case we are saying diff which means to substitute what was piped in
-with the -.
-
-Up to this point, we have the contents of our remote directory and we have run
-the diff command. All we need now is to give it the second input to compare our
-first to. This brings us to our final step getting the contents of a local
-directory.
-
-This is about one of the most common linux command line functions performed.
-However, due to the fact that we want to compare the contents of the directory
-with the contents of another directory, things get a bit more complicated
-sadly. Do accomplish this, we need to run a nested command.
-
-Ordinarily running ls -1 /tmp after a diff command would result in an error
-rather than giving us what we want. To substitute a command for a file and so
-compare the command's output, we need to encase it in <(). Our final piece of
-the command should look like this.
-
-----
-<(ls -1 /tmp)
-----
-
-This completes our command. If you try to run the entire thing, you should be
-asked for your password to the remote server. Upon entering your password, the
-command should run as expected, comparing the files and folders in the two
-directories.
-
-The final command again looks like this...
-
-----
-ssh username@ServerIP 'ls -1 /tmp' | diff <(ls -1 /tmp)
-----
-
-If you want to get really tricky, you can compare the contents of a
-remote file and the contents of a local file. We'll take httpd.conf for
-instance.
-
-----
-ssh username@ServerIP 'cat /etc/httpd/conf/httpd.conf' | diff <(cat /etc/httpd/conf/httpd.conf)
-----
-
-Hopefully that description wasn't too confusing. It's a complicated command to
-run (probably the worst I have ever used actually), but with some practice, it
-should become pretty easy if you understand how it works.
-
-Let me know if I didn't describe anything well enough and I will do my best to
-help out and update the post so it is more user friendly.
-
-Thanks for reading!
-
-
-Category:Linux
-Category:SSH
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Desktop_Sharing.ascii b/src/Linux:Desktop_Sharing.ascii
deleted file mode 100644
index 521a16d..0000000
--- a/src/Linux:Desktop_Sharing.ascii
+++ /dev/null
@@ -1,73 +0,0 @@
-Linux:Desktop Sharing
-=====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-For the last several weeks, I and several others running Linux on my team have
-been unable to use the third party desktop sharing service our company has
-purchased. This is due to the fact that several weeks ago, we all received
-updates to our system versions of Java (openjdk and icedtea), which broke their
-"web" client. We still need to share desktops though on occasion for meetings,
-so a solution needs to be found. Thankfully there is a pretty great solution
-out there for this that handles surprisingly well:
-https://en.wikipedia.org/wiki/Virtual_Network_Computing[VNC].
-
-[[enter-vnc]]
-== Enter VNC
-
-I'm not VNC's biggest fan. It's a really neat protocol, but it is often
-misused. In nearly every deployment of it that I have seen, the end user didn't
-tunnel through ssh, didn't enable ssl, and/or used their actual account
-password to password the vnc session. If someone were particularly clever, they
-could record the packets and effectively replay the vnc session and possibly
-get the user's password amongst a list of other potential things.
-
-Now, given that we're doing desktop sharing, we can't tunnel over ssh because
-that requires a user account (unless you set up an anonymous account, which is
-another good option). We can however do vnc over ssl.
-
-To get going, we need one piece of software -
-**http://www.karlrunge.com/x11vnc/[x11vnc]**. X11vnc differs from other vnc
-servers in that it allows you to share display :0 rather than creating a new
-virtual display (typically starting at :1). This allows you to physically be
-using the display while other people watch it. Let's look at the
-command/script to get this started...
-
-----
-#!/usr/bin/env bash
-echo "Sharing desktop on 5900" x11vnc -viewonly -ssl -sslonly -passwd <password> -forever
-----
-
-What we have here is...
-
-[cols=",,,,,",options="header",]
-|===============================================================
-|x11vnc |-viewonly |-ssl |-sslonly |-passwd <password> |-forever
-|
-|Prevents users from taking control of your display
-|Makes ssl connections available
-|Forces SSL to be used by all connecting clients
-|Set the session password
-|Don't shut the server down when a user disconnects
-|===============================================================
-
-A few things to note here...
-
-One final thing I would like to point out is that with this, you can do
-clipboard sharing if the clients all support it. All the sharer has to do is
-copy something and all of the clients should be able to paste it on their
-computers. I've used this for several meetings now and it works great. The
-biggest difficulty I've had up to this point is to get people to install VNC
-clients for the first time. Once they've got that going, they typically comment
-shortly after the meeting about how much faster and easier vnc is than the
-service the company pays for.
-
-
-Category:VNC
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Formatting_a_Hard_Drive.ascii b/src/Linux:Formatting_a_Hard_Drive.ascii
deleted file mode 100644
index 51ef09b..0000000
--- a/src/Linux:Formatting_a_Hard_Drive.ascii
+++ /dev/null
@@ -1,108 +0,0 @@
-Linux:Formatting a Hard Drive
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Good afternoon everyone or good evening/morning, depending on which time zone
-you're reading this from...
-
-*Ahem*
-
-Good afternoon from GMT -7 everyone (much better),
-
-If you've done anything with Linux-based servers you have most likely at one
-time or another had to format a hard drive, which unfortunately can be quite
-the feat in Linux if you're not too comfortable with the command line (which if
-you're a linux sys admin, you shouldn't be). In this post, I will be describing
-how to format an ENTIRE drive (doing a portion is a bit more complicated...post
-in the comments section if you want to see a post on how to do a partial
-format).
-
-[[finding-which-drive-to-format]]
-== Finding which drive to format
-
-To start off, we need to find the disk that needs to be formatted. Do this by
-typing
-
-----
-sudo fdisk -l
-----
-
-If the disk has not been formatted you should
-see
-
-----
-Disk /dev/ doesn't contain a valid partition table.
-----
-
-If the drive has already been formatted you need to either identify the drive
-by the amount of space (the blocks column...it's in kilobytes. For example:
-249023502 is roughly 250 gigabytes). Another method is to use
-
-----
-mount -l
-----
-
-The drive should show up as **/dev/ on /media/**.
-
-
-[[formatting-the-drive]]
-== Formatting the drive
-
-To start up the format process, let's type
-
-----
-fdisk /dev/sdc
-----
-
-(sdc is our example drive. The drive you want to format was found in the
-previous step).
-
-If your drive already has a partition table, you need to delete that. Do this
-by typing the letter *"d"* and pressing enter.
-
-If the drive is NOT formatted yet, all you need to do here is press the letter
-**"n"**.
-
-Fdisk will now prompt you to give it a start and end block for the partition
-(this is essentially how much of the drive to create the partition table for).
-If you want to format the entire drive, just hit enter twice to select the
-defaults (the first and the last blocks...the entire drive).
-
-Now that we've selected which parts of the drive to format, press *"w"* to
-write the changes to the disk (up to this point, no changes have been made so
-if you want to get out, now is the time).
-
-Now that we've formatted the drive and created the partition table, we can
-mount the drive. To mount the drive, there are two options.
-
-First, the drive can be removed and plugged back in. This will cause an
-auto-mount (if that's enabled on your machine). The other way is to use the
-mount command. To do this, we need a mount point. This can simply be a folder
-where your drive will show up (without getting too complicated). For this
-example, I'll put a folder at *.
-
-Now, earlier when we formatted the hard drive, we formatted the drive located
-at* (drive sdc partition 1). Now, with that out of the way, let's mount
-partition one.
-
-Type *
-
-What that does is mount partition one (/dev/sdc1) at *.
-
-Many people say practice makes perfect. With that, go practice formatting on
-all of your hard drives and usb sticks.   :)
-
-Once again...
-
-*Disclaimer: I am not responsible for any loss of data or damage to
-personal property due to attempting the contents of this article.*
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Luks_Password_Changing.ascii b/src/Linux:Luks_Password_Changing.ascii
deleted file mode 100644
index 96e3790..0000000
--- a/src/Linux:Luks_Password_Changing.ascii
+++ /dev/null
@@ -1,43 +0,0 @@
-Linux:Luks Password Changing
-============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Given my most recent posts about Linux Linux:System_Encryption[system
-encryption] and Linux:dm-crypt_Encrypted_Home_Directories[encrypted home
-directories], I think this post is a good followup since account passwords
-should be changed routinely.
-
-I use http://code.google.com/p/cryptsetup/wiki/DMCrypt[dm-crypt] with a
-http://code.google.com/p/cryptsetup/[LUKS header] for my work computer's
-encryption. It uses my Active Directory password for the luks password. While
-my Windows password is a very safe one, Windows NTLM
-https://securityledger.com/2012/12/new-25-gpu-monster-devours-passwords-in-seconds/[is
-not the most secure hashing algorithm] on the planet, but I digress.
-
-I just changed my password at work after 3 months of use, which means I've got
-to update my LUKS header with the new key and remove the old one (it still
-works fine, I just want one password for my logins). Yes, this is in the man
-page, but I thought I'd post this here for anyone too lazy (like myself) to
-hunt through the man page. It turns out there is a change key feature of
-cryptsetup.
-
-----
-luksChangeKey <device>
-----
-
-If you run that command, it will ask you for the old password. Type that in and
-if it matches, you will be prompted to enter the new password twice. Once
-that's done, there's no need to umount and remount. The next time the volume is
-remounted though, it will require the new password.
-
-
-Category:Linux
-Category:Security
-Category:Encryption
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:RAID_Setup.ascii b/src/Linux:RAID_Setup.ascii
deleted file mode 100644
index e9455cc..0000000
--- a/src/Linux:RAID_Setup.ascii
+++ /dev/null
@@ -1,253 +0,0 @@
-After fighting with the problem detailed in my Btrfs:RAID_Setup[ last
-post] about this, I decided to go hunting for information about RAID 5
-implementation in btrfs. It turns out that it hasn't been completely
-implemented yet. Given the status verbage on their wiki page, I'm
-surprised it works at all. I suspect the wiki isn't entirely up to date
-though since it does seem to work to a certain extent. I still need to
-do more research to hunt this down though.
-
-You can find that wiki post
-https://btrfs.wiki.kernel.org/index.php/Project_ideas#Raid5.2F6[here].
-
-[[the-new-new-solution]]
-== The NEW New Solution
-
-Since RAID 5/6 is not yet completely implemented in Btrfs, I need to find
-another solution. Given that I still want redundancy, the only other obvious
-option I thought I had here was a
-http://en.wikipedia.org/wiki/Standard_RAID_levels#RAID_1[RAID 1] configuration.
-However, as many Google searches do, searching for something leads to something
-else very interesting. In this case, my search for Linux RAID setups sent me
-over to the official kernel.org
-https://raid.wiki.kernel.org/index.php/Linux_Raid[RAID page], which details how
-to use http://en.wikipedia.org/wiki/Mdadm[mdadm]. This might be a better option
-for any RAID level, despite Btrfs support since it will detatch dependency on
-the filesystem for such support. Everyone loves a layer of abstraction.
-
-[[setup---raid-5]]
-=== Setup - RAID 5
-
-Let's get the RAID array set up.
-
-----
-mdadm -C /dev/md0 -l raid5 -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
-# Or the long version so that makes a little more sense...
-mdadm --create /dev/md0 --level raid5 --raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
-----
-
-
-[[setup---raid-1]]
-=== Setup - RAID 1
-
-----
-mdadm -C /dev/md0 -l raid1 -n 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
-# Or the long version so that makes a little more sense...
-mdadm --create /dev/md0 --level raid1 --raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
-----
-
-
-[[what-just-happened]]
-=== What Just Happened?
-
-[cols=",,,",options="header",]
-|=======================================================================
-|mdadm |-C,--create /dev/md0 |-l,--level raid5 |-n,--raid-devices 3 /dev/sdb1 /dev/sdc1 /dev/sdd1
-|
-|Create a virtual block device at /dev/md0
-|Set the raid level to RAID 5 for our new device
-|The number of RAID devices is 3 - /dev/sdb1, /dev/sdc1, and /dev/sdd1.
-|=======================================================================
-
-
-[[the-rest]]
-=== The Rest
-
-We did just create a RAID array and a virtual device to map to it, but that's
-all. We still need a filesystem. Given that this whole series of posts has been
-about using Btrfs, we'll create one of those. You can still use whatever
-filesystem you want though.
-
-----
-mkfs.btrfs /dev/md0
-mount /dev/md0 /mnt/home/
-----
-
-
-[[mounting-at-boot]]
-=== Mounting at Boot
-
-Mounting at boot with mdadm is a tad more complicated than mounting a typical
-block device. Since an array is just that, an array, it must be assembled on
-each boot. Thankfully, this isn't hard to do. Simply run the following command
-and it will be assembled automatically
-
-----
-mdadm -D --scan >> /etc/mdadm.conf
-----
-
-That will append your current mdadm setup to the mdadm config file in /etc/.
-Once that's done, you can just add /dev/md0 (or your selected md device) to
-/etc/fstab like you normally would.
-
-
-[[simple-benchmarks]]
-== Simple Benchmarks
-
-Here are some simple benchmarks on my RAID setup. For these I have three
-1TB Western Digital Green drives with 64MB cache each.
-
-
-[[single-drive-baseline]]
-=== Single Drive Baseline
-
-[[ext4]]
-==== Ext4
-
-1GB Block Size 1M (1000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
-1000+0 records in
-1000+0 records out
-1048576000 bytes (1.0 GB) copied, 4.26806 s, 246 MB/s
-----
-
-1GB Block Size 1K (1000000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
-1000000+0 records in
-1000000+0 records out
-1024000000 bytes (1.0 GB) copied, 6.93657 s, 148 MB/s
-----
-
-
-[[raid-5]]
-=== RAID 5
-
-[[btrfs]]
-==== Btrfs
-
-1GB Block Size 1M (1000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
-1000+0 records in
-1000+0 records out
-1048576000 bytes (1.0 GB) copied, 3.33709 s, 314 MB/s
-----
-
-
-1GB Block Size 1K (1000000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
-1000000+0 records in
-1000000+0 records out
-1024000000 bytes (1.0 GB) copied, 7.99295 s, 128 MB/s
-----
-
-[[ext4-1]]
-==== Ext4
-
-1GB Block Size 1M (1000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
-1000+0 records in
-1000+0 records out
-1048576000 bytes (1.0 GB) copied, 12.4808 s, 84.0 MB/s
-----
-
-1GB Block Size 1K (1000000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
-1000000+0 records in
-1000000+0 records out
-1024000000 bytes (1.0 GB) copied, 13.767 s, 74.4 MB/s
-----
-
-[[raid-1]]
-=== RAID 1
-
-[[btrfs-1]]
-==== Btrfs
-
-1GB Block Size 1M (1000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
-1000+0 records in
-1000+0 records out
-1048576000 bytes (1.0 GB) copied, 3.61043 s, 290 MB/s
-----
-
-1GB Block Size 1K (1000000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
-1000000+0 records in
-1000000+0 records out
-1024000000 bytes (1.0 GB) copied, 9.35171 s, 109 MB/s
-----
-
-
-[[ext4-2]]
-==== Ext4
-
-1GB Block Size 1M (1000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test.img bs=1M count=1000
-1000+0 records in
-1000+0 records out
-1048576000 bytes (1.0 GB) copied, 8.00056 s, 131 MB/s
-----
-
-1GB Block Size 1K (1000000 blocks)
-
-----
-[root@zion home]# dd if=/dev/zero of=./test2.img bs=1K count=1000000
-1000000+0 records in
-1000000+0 records out
-1024000000 bytes (1.0 GB) copied, 9.3704 s, 109 MB/s
-----
-
-
-Those aren't exactly dazzling write speeds, but they're also not too bad, given
-what's happening in the background and that I'm using three standard 7200 rpm
-desktop drives with 64MB of cache a piece. Later down the line I might test
-this with a RAID 0 to see what the max speed of these drives are (though it
-should predictably be three times the current speed).
-
-
-[[final-thoughts]]
-== Final Thoughts
-
-My favorite thing about this at this point is the layer of abstraction doing
-RAID through mdadm provides (we all know how much Linux folk love modularity).
-Using the RAID functionality in Btrfs means I am tied to using that filesystem.
-If I ever want to use anything else, I'm stuck unless what I want to move to
-has its own implementation of RAID. However, using mdadm, I can use any
-filesystem I want, whether it supports RAID or not. Additionally, the setup
-wasn't too difficult either. Overall, I think (like anyone cares what I think
-though) that they've done a pretty great job with this.
-
-Many thanks to the folks who contributed to mdadm and the Linux kernel that
-runs it all (all 20,000-ish of you). I and many many other people really
-appreciate the great work you do.
-
-With that, I'm going to sign off and continue watching my cat play with/attack
-the little foil ball I just gave her.
-
-
-
-Category:Linux
-Category:Btrfs
-Category:Ext4
-Category:Storage
-Category:RAID
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Secure_Authentication.ascii b/src/Linux:Secure_Authentication.ascii
deleted file mode 100644
index 9b21934..0000000
--- a/src/Linux:Secure_Authentication.ascii
+++ /dev/null
@@ -1,264 +0,0 @@
-Linux:Secure Authentication
-===========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-:github: https://github.com/nullspoon/
-
-
-== {doctitle}
-
-**Edit**: I wrote the script for automating this finally. It can be found on my
-link:{github}/keymanage[GitHub].
-
-In my experience, Linux authentication seems to be one of those problems with
-so many answers. It's hard to define even a range of methodologies that could
-be considered right, let alone narrowing it down to one or two. I've been
-dealing with this one at work quite a bit recently at work and would like to
-post here an idea I had. Just to be warned, this idea was not accepted for our
-solution, despite no one being able to give me more than one reason to not use
-it, which I will detail at the end of this post along with any other exploits I
-can imagine for this authentication methodology.
-
-[[in-a-perfect-world...]]
-== In a perfect world...
-
-In a perfect world, chroot environments would work securely and our app
-developers and third party vendors would write code on par with apache or
-openssh which could be started as root and spawn child processes in user space
-for security. All application files would fit nicely into the defined standards
-for Linux filesystem organization so we could package everything up nicely and
-deploy using repo servers. To top it all off, all applications would roll their
-own logs instead of filling up /var/log or somewhere on / since they rarely
-follow standards. However, this is rarely if ever the case (I've never seen it
-at least).
-
-What I've seen up to this point is third party applications that install
-themselves exclusively in /opt; applications that are hard coded to not start
-unless running as uid 0 (root); binary startup scripts that situate themselves
-in /etc/rc.d/init.d/ (wtf guys?), and just general stubborness as to where the
-program is located.
-
-[[securing-an-application-server]]
-== Securing an Application Server
-
-The first step I typically take to securing applications is to run them in user
-space as a service account with access only to its directory in the /apps mount
-point. I put that one to use on my own servers and it has served me very well.
-However, with this we have a few problems.
-
-[[accessing-service-accounts]]
-== Accessing Service Accounts
-
-While security does tend to introduce complications and interruptions into
-workflow, it shouldn't be catastrophic. If your security measures are so
-strict, your users can't do what they need to, you're doing it wrong. Simply
-running in userspace introduces several problems. A few for example...
-
-1. How do your users get to their service accounts in a secure way (no shared
- passwords or keys)?
-
-2. How do your users transfer files to and from their servers since they can't
- directly access the service accounts?
-
-3. How do you manage this web of shared account access without it consuming
- much of your time?
-
-Specifically, a solution is needed for the users to access their service
-accounts in an accountable and auditable way without hindering their ability to
-do their jobs [too much].
-
-This has been a problem myself and some fellow engineers have struggled with
-for a while now. Here's a few common service account authentication mechanisms
-that I'm sure we've all seen that aren't necessarily the greatest.
-
-
-[[service-account-passwords]]
-=== Service Account Passwords
-
-1. They need to be shared for multiple users to have access
-
-2. They can be shared without the admins knowing (no accountability)
-
-3. They have to be routinely changed which causes a huge headache for everyone
- involved, os and app admins alike
-
-
-[[service-account-keys]]
-=== Service Account Keys
-
-1. They need to be shared for multiple users to have access
-
-2. They can be shared without the admins knowing (no accountability)
-
-3. They have to be routinely changed which causes a slightly lesser headache
- than passwords for everyone involved, os and app admins alike
-
-
-[[sudo]]
-=== Sudo
-
-Sudo provides a pretty clean solution to the problem. It allows you to
-limit who has access to the service account as well log who uses it and
-when. Just put your application admins into their own group and give
-that group explicit access to run ONE command...
-
-[[sudo-su---service_account]]
-==== sudo su - service_account
-
-This one is tremendously popular for very obvious reasons. However, despite
-using sudo, this one still has problems
-
-1. Your end users can't perform file transfers between their boxes since can't
- directly access their service accounts without a key or password
-
-2. We still lack accountability. Once the user is in a sudo'd shell, their
- commands are no longer logged.
-
-3. Managing this across an environment can be a very time consuming thing
- unless you have a source on a server that you propogate out, but then you
- have to deal with server compliance.
-
-Granted, there is a pretty obvious _Unixy_ solution to this, but it involves
-your users all being in the same group as your service account, mucking around
-with umasks that unset themselves on reboot unless explicitely set, and making
-sure your sticky bit sticks.
-
-There is another way though.
-
-[[my-poorly-formed-idea]]
-== My Poorly Formed Idea
-
-My idea uses a combination of the crontab, jump hosts, ssh keys, and segregated
-networks.
-
-Start with two (or more) segregated networks: one for administration, and
-several for operations. You will probably want three for operations:
-production, QA, and dev.
-
-From there, you put your servers in your operations networks and set up
-firewall or routing rules to only allow ssh (port 22 or whatever port you
-prefer) traffic between the administration network and the operations networks.
-Your operations networks should now only be accessible for users using the
-applications and admins coming in from the administration network using ssh.
-
-Next, build out a jump box on your administration network. One per application
-would be ideal for seperation of concerns, but one for all apps should work
-well also. For sake of simplicity, we'll assume a single jump host.
-
-Next, put all of your service accounts on that jump host with their own home
-directories in /apps. This assumes you have defined and reserved UIDs and GIDs
-for each of your service accounts so they can be on one system without
-conflicts. Provide sudo access to each user group to _sudo su -
-<service_account>_ into their respective service accounts on the jump host.
-
-At this point, the application admins/owners still don't have access to their
-service accounts on the operations servers. Here's where they get that access
-using rotating ssh keys. Write a script to generate ssh keys (I'll post the
-source for mine later), ssh out to a box using the key to be replaced, push the
-new key, and remove the old key and any others while using the new key. This
-allows you to schedule key changes automatically using cron. With that in
-place, just have the script swap out each service account's key every x minutes
-(15 or 30 is what I have in mind). Once you've got the key exchange working,
-modify the sshd_config files throughout your environment to disallow all user
-login over ssh with passwords, that way if your users do set a password to try
-to circumvent your security, it won't be accepted anyways. You can also just
-disable password changing.
-
-[[pros]]
-== Pros
-
-[[operations-networks-become-a-black-box]]
-=== Operations Networks Become a Black Box
-
-With this method, there is only one way in to every single operations
-box. That one way in is in a secured subnet, presumably accessible only
-through a vpn or when on site.
-
-[[file-transfers-are-seamless]]
-=== File Transfers are Seamless
-
-Users can use scp or sftp to transfer files seamlessly using the jump host as
-the medium. If the keys are always regenerated as id_rsa, or the ssh config
-file is set up for each account, key regeneration won't affect anyone because
-it takes milliseconds to overwrite the old key with the new one, so any new
-connections out will use the new key. End users shouldn't even see an effect.
-
-[[safety-despite-turnover]]
-=== Safety Despite Turnover
-
-If your company has any measure of turnover, you've undoubtedly gone through
-the password and key change process after an employee leaves the team. With
-this method, you're automatically changing the key every X minutes, so even if
-they do get the key, it'll only be valid for a very short while.
-
-[[lower-licensing-costs]]
-=== Lower Licensing Costs
-
-Many companies, through the use of additional software such as Open LDAP,
-Samba, or some other third party product, put their Linux/Unix servers on their
-Windows Domain. A perk of this is it provides access to Linux to your AD users
-without having to manage a few hundred or thousand passwd, group, and shadow
-files. The downside to this is that if a third party product is used, it costs
-a lot of money in licenses. With the jump host rotating key model, you can put
-just the jump host(s) on the domain, and leave all operations servers off of
-the domain. It saves on licensing costs, maintainence time, and software
-installs. It also removes yet one more service running on your operations boxes
-which removes one more access point for exploitation. Additionally, the fewer
-pieces of software running on a server, the less chance an update will break
-the applications it's hosting.
-
-
-[[clean-home-directories]]
-=== Clean Home Directories
-
-Next up, clean home directories. If you have an entire team of developers
-and/or application admins logging into every operations system, /home is going
-to be very large on lots of systems, costing money for backups (if you back
-home directories up that is), wasting storage space (which is fairly cheap
-these days though), and adding spread to your users's files, making it
-cumbersome for everyone to manage, including non system admins. With the jump
-host rotating key method, all of your home directories are on one host, so file
-management for the support staff is much easier.
-
-
-[[cons]]
-== Cons
-
-
-[[single-point-of-failure]]
-=== Single Point of Failure
-
-This is the one objection I heard from people at work. This can be
-mitigated in at least two ways. One is by having one jump host per
-application. It still beats putting hundreds or thousands of systems in
-AD and all the management and licensing costs that goes with that.
-Another way to mitigate this is to have a seconday jump host and set up
-rsync to synchronize the primary jump host with the backup, using the
-backup as a hot standby.
-
-
-[[single-point-of-global-access]]
-=== Single Point of Global Access
-
-This is the one problem with this idea that I think is most relevant and
-potentially exploitable. However, if your administration boxes are on a network
-that is not reachable from anywhere but controlled locations, this shouldn't be
-too big of a deal. However, if a mistake is made in the networking security or
-routing and a malicious user gets to a jump host, they still have to get into
-the service accounts which are inaccessible except through sudo, which means
-the malicous user has to exploit an existing account. Without that account's
-password though, they can't sudo so they would only have access to that one
-user's files. Even if they could sudo though, they will still only have access
-to the service accounts that user works with, so their impact would be minimal
-unless that user works on very high profile applications. To sum it up, there
-are three very solid security measures in place (network segretation, user
-accounts, limited sudo access requiring passwords) that the malicious user has
-to get through before having any really impacting access.
-
-
-Category:Linux
-Category:Security
-Category:Authentication
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Symantec_VIP_Access.ascii b/src/Linux:Symantec_VIP_Access.ascii
deleted file mode 100644
index 99808a3..0000000
--- a/src/Linux:Symantec_VIP_Access.ascii
+++ /dev/null
@@ -1,32 +0,0 @@
-Linux:Symantec Vip Access
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-If your company has a vpn and like to have the illusion of security, they may
-use two-factor authentication to gain access to the vpn (as if certs weren't
-good enough, we've got to use proprietary algorithms with who knows how many
-backdoors.
-
-You may be experiencing issues with this if you're running linux. You may also
-be experiencing issues if you don't want to sacrifice 40G of hard drive space
-to a Windows virtual machine. If you fit into either or both of these
-categories, this is the post for you. It turns out, that we can finale.lly get
-Symantec VIP Access to run on Linux through wine.
-
-The trick... (because I don't have time to write this full post)
-
-----
-winetricks wsh57
-----
-
-THEN run the installer.
-
-To be continued. It still won't generate a key or even open after this.
-Installation works fine though
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:System_Encryption.ascii b/src/Linux:System_Encryption.ascii
deleted file mode 100644
index e9ff71b..0000000
--- a/src/Linux:System_Encryption.ascii
+++ /dev/null
@@ -1,155 +0,0 @@
-Linux:System Encryption
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-As mentioned in a Linux:dm-crypt_Encrypted_Home_Directories[previous post], I
-use dm-crypt with a luks header and the pam-mount module to encrypt and mount
-the home directories on my laptop and server. While this works fantastically,
-it does have a potential fatal flaw, which is that my operating system is
-readily available to a would-be attacker. For instance, if they were skilled
-enough (which I am not), they could modify the any number of applications on my
-system to, quitely dump or send my encryption key password the next time I
-mount my home directory, thus defeating my security. Further, my system is
-readily available for any linux user good with mounting and chroot knowledge
-(which is probably most of us), and thus one could do all kinds of mischief on
-the unencrypted system partition of my computer.
-
-I'm sure this is a bit tin-foil hatted of me. I have nothing to hide (though
-it's not about that, it's a matter of principle). Further, there is no one
-[_that I know of_] who would be *that* interested in me or my data. Despite,
-this is a very cool thing that I am doing purely because it can be done (in
-slang I believe the term is "the cool factor").
-
-[[a-preliminary-note]]
-== A Preliminary Note
-
-I would not recommend this be done for servers or multi-user laptops or
-desktops. This process requires that a password be typed or a key be available
-every time the system is booted, which requires physical presence to do so.
-Since most servers are administered and used remotely over a network, a reboot
-would me a service outtage until someone were able to open a local terminal to
-type the password (to say nothing about having to share the password with
-multiple people).
-
-[[overview]]
-== Overview
-
-Due to the scope of this post and that I don't want to focus on documenting
-some other tasks that are more generic and less related to the actual
-encryption of the system, I will not be covering how to back up your system or
-to partition your drive. However, please see the following two notes.
-
-During the installation process we will...
-
-. Set up encryption
-. Modify the grub defaults so it properly sets up the loop device on boot
-. Modify the Initramfs Configuration (this one is Arch Linux specific)
-
-[[setting-up-encryption]]
-Setting Up Encryption
-~~~~~~~~~~~~~~~~~~~~~
-
-We're going to assume here that the system partition will be installed
-on sda2. With that, let's "format" that with luks/dm-crypt.
-
-WARNING: Again, back up your data if you haven't already. This will irrevocably
- destroy any data on the partition [unless you are good with data
- recovery tools].
-
-----
-cryptsetup luksFormat /dev/sda2
-----
-
-And so our installation can continue, the loop device needs to be set up and a
-filesystem created
-
-----
-# Open the encrypted container to the system map device (though you can name it whatever you want)
-cryptsetup luksOpen /dev/sda2 system
-# ...Type the password
-# Create the filesystem here - I use btrfs
-mkfs.your_choice /dev/mapper/system
-# Mount the filesystem
-mount /dev/mapper/system /mnt/ # Or wherever your distro's installation mount point is
-----
-
-Now that this is done, it's time to re-install or copy from backups your system
-to the new encrypted container.
-
-[[modifying-the-grub-defaults]]
-Modifying the Grub Defaults
-~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Now that the system partition is setup up and our system re-installation is
-complete, it's time to configure Grub so it knows the system partition is
-encrypted. Without this step, you won't get past the initramfs since an
-encrypted system partition without a password is effectively useless. Here I
-will again assume your system partition is on /dev/sda2..
-
-Change...
-
-./etc/default/grub
-----
-...
-GRUB_CMDLINE_LINUX_DEFAULT="quiet"
-...
-----
-
-...to ...
-
-./etc/default/grub
-----
-...
-
-GRUB_CMDLINE_LINUX_DEFAULT="cryptdevice=/dev/sda2:system quiet"
-...
-----
-
-
-[[modifying-the-initramfs-configuration]]
-Modifying the Initramfs Configuration
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This part is oriented towards https://archlinux.org[Arch Linux]. Modifying the
-initramfs generation configuration is something that varies from distribution
-to distribution. I run Arch, so Arch it is! (let me know though if you want to
-know how to do it on another distribution and I'll figure it out and update the
-post).
-
-This is actually very simple on Arch. Simply open _/etc/mkinitcpio.conf_
-and edit the *HOOKS* line. What matters here is that the *encrypt* hook
-occurs _before_ the *filesystems* hooks.
-
-./etc/mkinitcpio.conf
-----
-...
-HOOKS="base udev autodetect modconf block encrypt filesystems keyboard fsck"
-...
-----
-
-Once you've done that, save and close the config file and run
-
-----
-mkinitcpio -p linux
-----
-
-You should be able to now reboot your system and it will prompt you for a
-password immediately after grub. If you were successful, you should be brought
-to a screen that looks something like...
-
-[role="terminal"]
-----
-A password is required to access the sda volume:
-
-Enter passphrase for /dev/sda2:_
-----
-
-
-Category:Encryption Category:Security
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Using_Bash_to_Generate_a_Wordlist.ascii b/src/Linux:Using_Bash_to_Generate_a_Wordlist.ascii
deleted file mode 100644
index 5aa30fe..0000000
--- a/src/Linux:Using_Bash_to_Generate_a_Wordlist.ascii
+++ /dev/null
@@ -1,84 +0,0 @@
-Linux:Using Bash to Generate a Wordlist
-=======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A few weeks ago my wife forgot her KeePass password. She can remember most of
-it, but there are certain portions of it she can't quite get (looks like the
-muscle memory didn't stick too well). With that, she asked me if there was a
-way for me to recover the password to her KeePass database. While the chances
-are slim, I figured if I could generate a pertinent wordlist, I could save a
-lot of time over having http://www.openwall.com/john/[John] incrementally try
-every possible password all the way up to 22 characters (Which happens to be
-_3,807,783,932,766,699,862,493,193,563,344,470,016_ possibilities totalling
-about _120,744 septillion years_ of crack time at 1000 hashes per second).
-
-
-[[inline-array-expansion]]
-Inline Array Expansion
-~~~~~~~~~~~~~~~~~~~~~~
-
-To do this, we're going to use one of bash's lesser-known functionalities:
-*inline array expansion* (I don't know its official name or if it even has one,
-so that's what I'm calling it).
-
-If you've ever looked up how to manually create a maildir directory, you've
-likely seen something like this
-
-----
-mkdir -p ./your_dir/\{cur,new,tmp}
-----
-
-At runtime, bash will expand that command to three seperate commands
-
-* mkdir -p ./your_dir/cur
-* mkdir -p ./your_dir/new
-* mkdir -p ./your_dir/tmp
-
-Another good example of this functionality would be creating a new home
-directory.
-
-----
-mkdir -p /home/username/\{Desktop,Documents,Downloads,Music,Pictures,Videos}
-----
-
-[[generating-the-wordlist]]
-Generating the Wordlist
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Applying this to generating a wordlist is very similar to creating
-"arrays" of nested directories in a single command. To generate a
-wordlist, we'll use the _echo_ command instead of mkdir (of course).
-We'll also use varying combinations of arrays in a single line.
-
-
-[[example]]
-Example
-^^^^^^^
-
-Suppose the password you want to work on is something like __password1234__.
-However, what we don't know is the order of the _1234_ at the end. We also
-don't know if the first letter is capitalized or not, or if the actual password
-uses 0's in lieu of o's, 4's in lieu of a's, or 5's in lieu of s's. Let's see
-what we can do about this.
-
-----
-echo \{p,P}\{4,a,A}\{5,s,S}w\{0,o,O}rd\{1,2,3,4}\{1,2,3,4}\{1,2,3,4}\{1,2,3,4} > possible_passwords
-----
-
-That should produce a file containing roughly 13,000 words. However, due
-to the way the arrays are processed, newlines are not inserted between each
-possible password. To remedy this, just do a quick sed expression (or awk if
-you like)
-
-----
-sed -i 's/ /\n/g' ./possible_passwords
-----
-
-With that, you now have a wordless primed and ready for use with john.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:Vpnc_Restart_Script.ascii b/src/Linux:Vpnc_Restart_Script.ascii
deleted file mode 100644
index d7f7d0a..0000000
--- a/src/Linux:Vpnc_Restart_Script.ascii
+++ /dev/null
@@ -1,47 +0,0 @@
-Linux: VPNC Restart Script
-==========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-The VPN at my company is very... spotty... at best. When working from home, it
-used to boot you about once every hour. For whatever reason though, it has
-recently started booting sessions every five minutes. Now, the solution is of
-course to speak with our networking folks rather than to write a script to fix
-the issue on a client by client basis. Unfortunately, due to the culture and
-various political situations, the networking folks will not fix this because
-they don't believe it's an issue. All opinionattion aside, this sounds like an
-opportunity for a nice shell script.
-
-To start things off, on my Linux box I use vpnc from the command line as I
-don't want to install network manager due to additional resource consumption
-(albeit a very small amount). That said, throw the following script in
-+~/bin/vpnconnect+ and include +~/bin+ in your PATH variable (+export
-PATH=~/bin:$\{PATH}+).
-
-[[source]]
-== Source
-
-_Edit_: Found a pretty sizeable flaw in my script. Pulled the source until I
-can sort it out.
-
-
-[[order-of-operations]]
-== Order of Operations
-
-. Check if vpnc is already running
- * Start if it is not running
-. Start an infinite loop
- . Sleep 5 to keep from using too many resources
- . Check cpu time on pid - if it is greater than 1 minute
- * Kill pid and restart vpnc
-
-
-
-Category:Linux
-Category:Drafts
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii b/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii
deleted file mode 100644
index 8ee0e94..0000000
--- a/src/Linux:dm-crypt_Encrypted_Home_Directories.ascii
+++ /dev/null
@@ -1,213 +0,0 @@
-Linux:dm=crypt Encrypted Home Directories
-=========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-There are three primary methods for encrypting one's home directory seamlessly
-in Linux: http://en.wikipedia.org/wiki/Dm-crypt[dm-crypt],
-http://ecryptfs.org/[eCryptFS], and http://www.arg0.net/encfs[EncFS]. All
-differences aside, this post will cover dm-crypt (as indicated by the title of
-course). A few things to note before going forwards though. First, this method
-is by no means the standard. I'm not even sure if there is a standard way to do
-this. This is just the way I've done it and it has worked out swimingly thus
-far on more than one computer. Secondly, my method detailed here will use
-something called http://code.google.com/p/cryptsetup/[LUKS]. I highly recommend
-this, if not just for convenience. While it does have its pitfalls, they
-shouldn't be too bad if you keep a backup of your data. Really though, when
-encrypting, you should _always_ keep more than one copy of your data in case
-something goes awry.
-
-Before proceeding, here is a list of what this will give you once completed, so
-you can decide if this is what you want before reading this monolithic post .
-
-. Users will each have their own encrypted home directory.
- * Each home directory will be unlocked using the user's own password.
- * Users have complete storage anonimity. Even root can't tell how many
- files they are storing, filenames, or even how much data they have unless
- the user is logged in at the time of inspection.
-. User's home directories will be seamlessly decrypted and mounted at login.
-. Users will have their own virtual device, so they will have a storage
- "quota". To expand it, the virtual device needs to be extended on its own
- (some might consider this cumbersome).
-
-
-[[setup]]
-== Setup
-
-This should be relatively simple. Install a package likely called *cryptsetup*
-(most of the mainstream distros should have it). This is the utility we will be
-using to manage dm-crypt volumes. Note also that cryptsetup can be used for
-managing more than just dm-crypt and luks. It also works with Truecrypt (much
-to my excitement a few months ago when I needed to extract some data from a
-Truecrypt volume, but didn't want to install it becuase of all the suspicion
-surrounding it lately).
-
-[[modifying-pam]]
-=== Modifying PAM
-
-[[etcpam.dsystem-auth]]
-==== /etc/pam.d/system-auth
-
-This piece assumes your distribution puts this file here and that it is named
-this. Unfortuantely, I can't really write this part to be distribution-agnostic
-as most of them do this differently to an extent. The contents of the file
-will likely look similar, despite its name. For anyone wondering though, this
-section is written from an Arch Linux instance.
-
-Open /etc/pam.d/system-auth in your favorite editor. Be sure to do this either
-with sudo or as root or you won't be able to save your changes.
-
-Here we need to put in calls to a module called pam_mount.so so it will be
-called at the right time to pass the user's password to the mount command,
-allowing for seamless encrypted home directory mounting. Pay attention to where
-the calls to pam_mount.so are. Order is very important in this file.
-
-NOTE: Many distributions use eCryptFS as their default encryption for home
- directories. They do it this way as well, but using pam_ecryptfs.so
- instead of pam_mount.so.
-
-./etc/pam.d/system-auth
-----
-#%PAM-1.0
-
-auth required pam_unix.so try_first_pass nullok
-auth optional pam_mount.so
-auth optional pam_permit.so
-auth required pam_env.so
-
-account required pam_unix.so
-account optional pam_permit.so
-account required pam_time.so
-
-password optional pam_mount.so
-password required pam_unix.so try_first_pass nullok sha512 shadow
-password optional pam_permit.so
-
-session optional pam_mount.so
-session required pam_limits.so
-session required pam_unix.so
-
-session optional pam_permit.so
-----
-
-
-[[etcsecuritypam_mount.conf.xml]]
-==== /etc/security/pam_mount.conf.xml
-
-This is the configuration file used by pam_mount when the user logs in.
-Depending on your distribution, it may or may not already be set up the way we
-need for this.
-
-Just before the +</pam_mount>+ at the end of the xml file, insert the following
-lines.
-
-./etc/security/pam_mount.conf.xml
-----
-...
-
-<volume fstype="crypt" path="/home/.%(USER)" mountpoint="/home/%(USER)" options="space_cache,autodefrag,compress=lzo" />
-<mkmountpoint enable="1" remove="true" />
-
-</pam_mount>
-----
-
-Before proceeding, there are a couple of assumptions that I need to mention
-about the way I do this here.
-
-. My home directories are all formatted with btrfs. If you're not using that,
- then remove the *autodefrag,compress=lzo* piece in the options section.
-
-. The encrypted block device files are located at */home/.USERNAME* (note the
- dot).
-
-
-[[creating-an-encrypted-home-per-user]]
-=== Creating an Encrypted Home Per User
-
-The creations of each user's home directory has a few fairly simple steps [if
-you've been using linux command line for a bit]. For the sake of more succinct
-directions, here we will assume a username of __kevin__.
-
-. Allocate user's encrypted home space (assuming 15 gigs)
- * +dd if=/dev/zero of=/home/.kevin bs=1G count=15+
- * This command writes 15 gigabytes of zeros to one file, /home/.kevin
-
-. Encrypt the user's home device
- * +cryptsetup luksFormat /home/.kevin+
- * This command will require the user to enter _their_ password when
- prompted after running the command, as that will be what is passed to
- the file container on login.
-
-. Open the user's new home device (you'll need the user to enter their password
- again)
- * +cryptsetup luksOpen /home/.kevin kevin+
- * This will only be needed the first time around. Kevin can't use this
- yet becasue it doesn't have a filesystem and it can't be mounted for the
- same eason.
-
-. Format the opened dm-crypt device
- * +mkfs.btrfs /dev/mapper/kevin+
- * This is assuming you want to use btrfs. Otherwise you'd use mkfs.ext4
- or some other filesystem of choice.
-
-. Cleanup
- * +cryptsetup luksClose kevin+
- * In this case, _kevin_ can be the alias given to the opened device on
- luksOpen. You can also provide its path at /dev/mapper/kevin.
-
-
-[[how-it-works]]
-== How it Works
-
-When a user logs in, they type their username and password. Those are passed to
-pam, which verifies the user's identity using the _pam_unix.so_ module. If the
-credentials provided by the user are correct, the next step is to pass that
-username and password to the _pam_mount.so_ module. This module runs the
-commands dictated in the pam_mount.conf.xml. The commands pam mount runs (as
-per our earlier configuration) are effectively
-
-----
-cryptsetup luksOpen /home/.$\{username} _home__$\{username} mount /dev/mapper/_home__$\{username} /home/%\{username}
-----
-
-Those commands open the dot file (/home/.username) for the given user with the
-recently provided password. It then mounts that user's decrypted dot file at
-the user's home directory (/home/username).
-
-
-[[backups]]
-== Backups
-
-This kind of encryption makes backups a bit difficult to pull off as the
-administrator. Because you don't have each user's password, you can't back up
-their data. This leaves you with one option - back up the encrypted block
-devices themselves. Depending on how much space each user is given, this can
-take a long time (though rsync helps significantly with that) and a lot of
-space. This is the downside to
-https://wiki.archlinux.org/index.php/Encryption#Block_device_encryption[block
-device encryption].
-https://wiki.archlinux.org/index.php/Encryption#Stacked_filesystem_encryption[Stacked
-encryption] though, while rumored to be less secure for various reasons, allows
-administrators access to encrypted verions of each user's data. With stacked
-encryption, each individual file's contents are encrypted, but the user's
-filenames, paths, and file sizes are still accessible to the administrator(s)
-(hence the rumored security flaw).
-
-As a user though (if you're using this on your laptop for instance), backups
-are simple because the data itself is available to you (you have the password
-after all). This however assumes you have user rights on a remote server to
-rsync your data to. Even if the remote server has the same dm-crypt setup,
-rsync still sends your credentials, so your data can go from an encrypted
-laptop/desktop to an encrypted server.
-
-
-
-Category:Storage
-Category:Security
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii b/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii
deleted file mode 100644
index 58d6a70..0000000
--- a/src/Linux_Storage_Devices,_Partitions,_and_Mount_Points_Explained.ascii
+++ /dev/null
@@ -1,131 +0,0 @@
-Linux Storage Devices, Partitions, and Mount Points Explained
-=============================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Earlier today I was talking with a guy in the Ubuntu IRC channel (shout out to
-codemagician!) who was asking how to format a USB stick via the command line.
-Through explaining how it worked, I realized that to someone who isn't already
-very familiar with Linux, the way Linux handles drives can be very confusing,
-especially since you can control almost every step of the mounting process,
-unlike with Windows, which is why Windows is so easy (albeit less functional
-however).
-
-What do you say I do a post on how Linux handles storage devices? Yes? No?
-Great!
-
-[[the-quick-overview]]
-The Quick Overview
-------------------
-
-When you plug in a USB stick (for example) to your linux machine, it is
-assigned a device location (/dev/sd<something>). From there, that new device is
-assigned a mount point (assuming we are using Ubuntu here many Linux distros
-won't auto mount a storage device, even if it is internal). This mount point
-can be located anywhere, but typically is located in /media/. From the folder
-created in /media (or wherever the mountpoint is located), you can indirectly
-read and write data.
-
-[[the-dev-directory]]
-The /dev/ Directory
--------------------
-
-The /dev/ directory is an interesting one to explain. I probably won't do it
-right, but I'll give it a shot either way. Dev is short for devices. If you
-run ls from within /dev/ you will likely see things like sda, sdb, hda, and
-more and more devices.
-
-What do these mean? Basically, each of the files listed in /dev/ is a direct
-pointer to either a physical or a virtual device. This part is actually super
-cool I think. Basically, when you transfer say, a picture, to your usb stick,
-the operating system literally writes the instructions for writing the file (in
-binary) to the device location/file (/dev/sdb for instance), which in turn
-writes it to the USB stick. You may say that's not that neat, but consider your
-audio device. When your music player (amarok, rhythmbox, etc) plays music, it
-literally streams the music file's uncompressed binary audio to the audio
-device file and that is in turn translated by the hardware driver and converted
-into speaker vibrations.
-
-You can actually try this by running a quickie command in the command line. The
-audio device is typically located at /dev/dsp. Pick a file on your hard drive
-that you want to "listen" to (it is likely going to sound like static), and run
-the following command. For this example, I'm going to use a jpeg image.
-
-----
-cat /home/username/Desktop/background.jpeg > /dev/dsp
-----
-
-What we just did there was to redirect the file contents of background.jpeg
-into the device pointed to by /dev/dsp. Mine sounds like static for some time
-(It's a really high resolution jpeg).
-
-If THAT isn't cool, I don't know what is.
-
-
-[[mount-points]]
-Mount Points
-------------
-
-Once your storage device is assigned a device location (IE: /dev/sdb), it then
-needs a mount point that interfaces with the device location. In a less
-complicated fashion, you need a folder that represents the drive. For
-instance, say you plug in your usb stick named Maverick (that's one I formatted
-last night). Ubuntu creates a temporary folder located at /media/Maverick/.
-That became the mount point for my usb stick. All a mount point is, generally
-speaking (I'll get into the technicalities of it in the next paragraph), is
-simply a folder that points to a device location. Ubuntu, Mint, as well as
-Debian all default to creating folders in /media/.
-
-
-[[so-what-do-mount-points-and-device-locations-have-anything-to-do-with-each-other]]
-So what do mount points and device locations have anything to do with each other?
----------------------------------------------------------------------------------
-
-Here's where it gets pretty technical (so much so that I don't fully know how
-this works). Succinctly, a mount point provides an interface that your
-operating system uses to convert data into binary for writing directly to the
-device location. That means that when you copy a picture file to your usb stick
-(IE: /media/Maverick), your operating system converts it to binary and streams
-said binary to the device location associated (IE: /dev/sdb1) with that mount
-point.
-
-[[why-sdabcdef...]]
-Why sda,b,c,d,e,f...?
----------------------
-
-The sd part of that stands for storage drive. The a, b, c, etc. is simply an
-incrementing value assigned to your drive. If you plug in a usb drive, it will
-be assigned sdb. If you plug in a second, it will be assigned sdc. If you plug
-in a third, it will be assigned sdd, and so on.
-
-[[how-do-you-explain-the-number-at-the-end-of-my-device-locations-ie-devsdb1]]
-How do you explain the number at the end of my device locations (IE: /dev/sdb1)?
---------------------------------------------------------------------------------
-
-That number represents the partition. For instance, your local hard drive is
-device sda (presumably it was the first drive to be plugged in since your
-computer is running off of it). Your hard drive has partitions (these are like
-virtual sections in your hard drive with them you can divide your hard drive
-into one or more pieces mine is divided into 8 actually). Typically usb sticks
-only have one partition.
-
-That's all for now. I think I covered just about everything. If i missed
-anything, please let me know in the comments section and I'll add it on as soon
-as I get the chance.
-
-Now if you will all excuse me, I was at work at 2:00 this morning and
-desperately need sleep. Don't break too many things on your computer by
-redirecting file output to random devices now. I'm watching you...really.
-http://xkcd.com/838/[So is Santa].
-
-:)
-
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Lucid_Lynx_Release_Date.ascii b/src/Lucid_Lynx_Release_Date.ascii
deleted file mode 100644
index 53da0f4..0000000
--- a/src/Lucid_Lynx_Release_Date.ascii
+++ /dev/null
@@ -1,32 +0,0 @@
-Lucid Lynx Release Date
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I would like to take a few seconds to mention that in one day the latest
-version of Ubuntu, 10.04 Lucid Lynx, will be released.
-
-In light of this event, I would like to point out a little known fact about the
-versioning system for Ubuntu.
-
-Recently I discovered the reason for the seemingly massive jumps in numbers
-from version to version on a friend's blog. Canonical releases a new version
-every six months. That being said, the last version was Karmic Koala (v 9.10),
-released in September of 2010. The previous version, Jaunty Jackalope (v 9.04),
-was released in April of 2010. That being said, the version system is
-[month].[year] of the release. A bit clever, despite the lack of subversions.
-
-My worry is still though what we do when we get to a version starting with X
-(Xciting Xray???) or Y (Yippidy Yanky???).
-
-Enjoy the new and shiny distribution version everyone!
-
-
-Category:Ubuntu
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/MPlayer:Recursively_Play_All_Files.ascii b/src/MPlayer:Recursively_Play_All_Files.ascii
deleted file mode 100644
index 2252416..0000000
--- a/src/MPlayer:Recursively_Play_All_Files.ascii
+++ /dev/null
@@ -1,75 +0,0 @@
-MPlayer:Recursively Play All Files
-===========
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I've researched this one before and there doesn't seem to be a real standard
-for how to do this (such as a -r switch for recursive play). Granted, when in
-Linux is there a standard for something that doesn't really need to be
-standardized? In Linux land, there's usually a minimum of several ways to do
-something right. Figuring out newer and more efficient ways of doing things is
-fun! That said, I'm going to contribute http://xkcd.com/927/[my way of doing
-this] to the mix.
-
-To do this, we are going to need a magical (ooo, shiny) bash one liner that
-involves a little http://tldp.org/LDP/abs/html/process-sub.html[process
-substitution] (ksh, sh, and csh users, sorry. Those shells don't support
-process substitution).
-
-----
-mplayer -playlist <(find /path/to/music -type f -name \*.ogg)
-----
-
-[[what-just-happened]]
-== What just happened?!
-
-What we just did there was perform process redirection. When you run the
-**find /mnt/music -type...**, a process is started up. What the *<()*
-around the command does is create a link to the output of the pid at
-/dev/fd/63. A quick _ls -l_ will show us this.
-
-----
-[nullspoon@null music]$ ls -l <(find /path/to/music/ -name \*.ogg)
-lr-x------ 1 nullspoon nullspoon 64 Jun 14 10:00 /dev/fd/63 -> pipe:[59723]
-----
-
-If you want to see the contents of that file, you can simply just run the find
-command without anything else. If you want to see it in vim like you're editing
-it, replace _mplayer -playlist_ with __vim__. This will be like running +vim
-/dev/fd/63+.
-
-----
-vim <(find /path/to/music -type f -name \*.ogg)
-----
-
-Now, if you realy wanted to get crazy, you could change append to the
-find command a bit to listen only to music with names that have a 7 in
-them.
-
-----
-mplayer -playlist <(find /path/to/music/ -name \*.ogg | grep 7)
-----
-
-... Or sort our music backwards?
-
-----
-mplayer -playlist <(find /path/to/music/ -name \*.ogg | sort -r)
-----
-
-... Or a random sort?!
-
-----
-mplayer -playlist <(find /path/to/music/ -name \*.ogg | sort -R)
-----
-
-The last one is kind of pointless since mplayer has a *-shuffle* switch. I
-guess you could combine the two and get _doubly_ shuffled music! I think Chef
-Elzar would have something to say about that. "BAM!!!"
-
-
-Category:Linux
-
-// vim: set syntax=asciidoc:
diff --git a/src/Managing_Linux_with_Linux.ascii b/src/Managing_Linux_with_Linux.ascii
deleted file mode 100644
index 7984ffd..0000000
--- a/src/Managing_Linux_with_Linux.ascii
+++ /dev/null
@@ -1,56 +0,0 @@
-Managing Linux with Linux
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-It seems that many companies that run Unix/Linux servers don't let their admins
-run Unix or Linux. I'm not going to speculate about the preferences about other
-admins out there, but for myself, Windows XP, or even Windows 7 is nothing in
-comparison to desktop Linux. For me, the two most frustrating things I miss
-about desktop Linux while at work is transparent windows and a real Linux
-terminal (sorry PuTTY and KiTTY I just have to many issues while using you).
-The transparent windows I miss mostly because I write scripts just about all
-day while continuing to monitor our environment. It'd just be nicer having a
-full screen terminal that was semi-transparent so I could see our dashboards
-without having to change windows. Sure hot keys are good, but transparency is
-better.
-
-Anyways, I recently decided to try an experiment. I had a spare desktop laying
-around at work, so I installed Linux. My team uses private keys to log in to
-everything (trust me on this there is a lot of everything). We have several
-passworded private keys that we use to get in to different boxes. One upside to
-PuTTY and KiTTY is that they come with Pagent. Pagent basically keeps your
-passworded private keys loaded in memory and tries to use them with each new
-ssh session. This is nice, but how do we do this in Linux?
-
-The answer: ssh-agent.
-
-Like Pagent, the ssh-agent is a daemon that runs in the background and keeps
-the keys you have added in memory. I ran into one small issue with using it
-though. An ssh-agent instance is tied to a bash session. If for instance, you
-try to run ssh-add on a bash session without an ssh-agent running in it, you
-will receive the error
-
-----
-Could not open a connection to your authentication agent.
-----
-
-The way to fix this is to put the following line in your .bash_profile:
-
-----
-eval $(ssh-agent)
-----
-
-If you really want to get crazy, you can even put ssh-add into your \.bashrc
-file. The major downside to this though is that every new bash instance will
-ask for your private passwords if you have any set.
-
-Category:Unix
-Category:Linux
-Category:SSH
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/MediaWiki_vs_SharePoint.ascii b/src/MediaWiki_vs_SharePoint.ascii
deleted file mode 100644
index 878eafa..0000000
--- a/src/MediaWiki_vs_SharePoint.ascii
+++ /dev/null
@@ -1,100 +0,0 @@
-Mediawiki vs SharePoint
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-A ways back I began toying with MediaWiki as a proof of concept/research
-mission. As I slowly learned its capabilities, I started to realize that it had
-really great potential as a replacement for Microsoft Office SharePoint. I'm
-not saying that for religious reasons either. A few reasons I think it
-supercedes SharePoint are...
-
-
-[[mediawiki-pros]]
-== MediaWiki Pros
-
-* Its markup makes writing documentation fast and easy (wow that felt
- like an infomercial)
-
-* It doesn't require any particular browser to be fully functional (or
- even partially functional)
-
-* Document editing is done in browser without the need of external
- software
-
-* Check-out and check-in/save are done in two steps unlike with
- SharePoint where you must download a document, check it out so no one can
- make changes while you are working on it, make your changes in MS Word, save
- changes in MS Word, upload new version to SharePoint, fill out changelog
- information, and delete the local copy on your computer to avoid clutter and
- having multiple copies of one document. That might have been a bit over
- exaggerated but certainly not by much.
-
-* MediaWiki tracks content. SharePoint tracks documents. They both
- provide versioning but because of MediaWiki's content tracking, it can
- perform letter-by-letter comparisons on different article versions easily
- in-browser and without extra plugins (ActiveX, I'm looking at you!)
-
-* It has user pages which notify users if a change was made, making them
- ideal for assigning tasks to members of a team.
-
-* Permissions are rarely a concern (when should you be putting super
- sensitive information in unencrypted docs on a document repository anyway) as
- where in most SharePoint setups, permissions are often fought with. However,
- Mediawiki's permissions structure is simpler and less robust so this isn't
- necessarily a pro or a con.
-
-* MediaWiki is cheaper and uses fewer resources as a LAMP or WAMP stack
- requires a far less powerful machine and far less money in licensing fees
- than an IIS server.
-
-* Mediawiki is very fluid with its templating system and various popular
- extensions (one of my favorites is
- http://www.mediawiki.org/wiki/Extension:ParserFunctions[ParserFunctions])
- which allow it to be tailored to almost any project need without the
- need of an expensive developement team
-
-* MediaWiki is the software used by
- http://www.wikipedia.org/[Wikipedia], so support and development for it won't
- be going away any time soon and backwards compatibility will be a must for a
- very long time because one of the biggest and most popular sites on the
- internet has a vested interest in it working well with their current setup
-
-* MediaWiki is secure, again because it is used by
- http://www.wikipedia.org/[Wikipedia]. It can be assumed that such a high
- profile site is under constant attack and investigation. How many times
- have you seen Wikipedia go down because of a hack? How many times have
- you seen a SharePoint site go down just because of daily use?
-
-* It also supports a standardized wiki markup language so it can be
- ported to other products much easier than a SharePoint shared docs site can
-
-
-[[sharepoint-pros]]
-== SharePoint Pros
-
-* As mentioned, SharePoint's permissions structure is more robust than
- MediaWiki's but again, this isn't really a pro or a con, just a difference.
-
-* A SharePoint Shared Docs library can be mounted as a Windows share
- allowing _seemingly_ local editing of documents.
-
-* SharePoint integrates into Active Directory. MediaWiki does too, but
- not by default.
-
-* Windows admins should feel more comfortable administering SharePoint
- (not using, administering, MediaWiki is still unquestionably easier to use)
-
-* SharePoint supports browser-based calendars with a backend in Exchange
- offering mostly seamless integration of team calendars between Outlook and
- the team site
-
-That's all for now. If I think up more pros for either, I'll update the
-list here.
-
-
-Category:Open_Source
-Category:MediaWiki
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Migrating_SQL_Data.ascii b/src/Migrating_SQL_Data.ascii
deleted file mode 100644
index 7af806c..0000000
--- a/src/Migrating_SQL_Data.ascii
+++ /dev/null
@@ -1,110 +0,0 @@
-Migrating SQL Data
-==================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-In my current project I have found a need to migrate data from one SQL server
-to another. For those of you SQL gurus out there, you know that there are many
-ways to migrate data from point A to point B in SQL, even when different
-versions is involved. Here's my setup
-
-*Server 1:*
-
-* SQL Server 2005 Standard (x86)
-* Windows Server 2008 (x86)
-
-*Server 2:*
-
-* SQL Server 2008 Standard (x64)
-* Windows Server 2008 (x64)
-
-As you can tell, I'm upgrading versions *and* processor architectures.
-Surprisingly enough, this didn't seem to cause any issues for me.
-
-Here are a few options one has to migrate SQL data between servers for those
-who don't find this post too useful.
-
-* SQL Copy Database Wizard
-* Detach, copy to new server, and reattach
-* Backup database, copy backup to the new server, convert backup to a
- database, attach the converted database
-* Create a database mirror
-* Duplicate the database structure on server two and import the data
- from server 1
-
-For my environment, only 1, 3, and 5 would work since the others leave more
-possibility for data integrity issues during the transfer or require that the
-SQL server be temporarily taken offline. I tried out a few of my options and
-decided that 1, the SQL Copy Database Wizard, was the best option. It's
-relatively straightforward and very efficient.
-
-For the last three days I have been struggling with it because of what looks
-like permissions issues, though I can't be sure since all the error says is
-that step 1 was the last step to run and that the job failed (give me ambiguity
-or give me something else!). All that being said, I decided I needed to find a
-new way to transfer the data.
-
-Through all of my troubleshooting I found quite a few SQL facets that I pieced
-together to get what I needed. Here's how I chose to migrate my 12 databases
-without too much trouble.
-
-image:files/01_SQL_Migration_ScriptDatabaseAs.png[height=300]
-
-To start, I used SQL Server's remarkable "**Script Database as**" functionality
-to write out the query that creates the database, all of the columns, and all
-of their constraints. For mine I just copied the script to the clipboard to
-make compiling all of the scripts together much faster. To sum it up, I had SQL
-generate the queries for each database I wanted to migrate and I pasted them
-all into notepad so I could run them all at the same time.
-
-image:files/02_SQL_Select_Import_Data.png[height=300]
-
-After running all of the afforementioned queries to create the all of the
-database structures on your destination server we're ready to start importing
-data. * the database to import data to. Go to *Tasks* -> **Import Data**...
-
-If you haven't done this before, you should receive a "Welcome to the SQL
-Server Import and Export Wizard" screen. **Next**.
-
-image:files/03_SQL_Import_Choose_DataSource.png[height=300]
-
-Here we are at the "Choose a Data Source" screen. For Server name type the
-hostname of the server you need to migrate from (the source server). After
-that, select the database you want to copy to the new server. Once done with
-that, * **Next**.
-
-On the "Choose a Destination" screen, type in the name of the server to migrate
-the data to. Next, select the Database you want to copy the data to (this
-should corrospond to the DB name on the source server since we ran the create
-queries generated by SQL). In my case, I was running the Import and Export
-Wizard from the server I was importing the data to so SQL server already had
-the Server name and Database filled out. **Next**.
-
-In my case, I wanted to copy the entire database to the destination server, so
-for the "Specify Table Copy or Query" screen, I elected to "Copy data from one
-or more tables or views". **Next**. On the following screen, check all the
-database tables you want to be copied (or just check the box at the top left
-for all of them).
-
-Nearing the end the "Save and Run Package" screen comes up. If you don't need
-to save the package to be run later or again at a later time, just leave Run
-immediately checked and click **Next**. Finally we review our settings and what
-will be copied. If everything looks right, click Finish. Once the transfers
-have completed, click **Close**. If any transfers failed or threw a warning,
-you can click the "Messages" text next to the table that did not succeed to see
-the log entries about it.
-
-Repeat the aforementioned steps until you have migrated every database you
-need.
-
-
-
-Category:Microsoft
-
-Category:MsSQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Migrating_from_Drupal_7_to_Habari_.8.ascii b/src/Migrating_from_Drupal_7_to_Habari_.8.ascii
deleted file mode 100644
index 1d2e5cf..0000000
--- a/src/Migrating_from_Drupal_7_to_Habari_.8.ascii
+++ /dev/null
@@ -1,91 +0,0 @@
-Migrating from Drupal 7 to Habari .8
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Lately I've been trying out the latest release of
-http://habariproject.org/[Habari] and I really like it. They have created a
-very simple, yet functional and very clean interface with which to blog (not to
-mention its code implements the newest of just about everthing). With that,
-bitnode used to be run on Drupal, so converting from elenty billion 'articles'
-(that's the technical number) to posts in Habari was not looking too easy.
-After some searching, I found that the best way to convert without having to
-write some sql statements would be to migrate from Drupal 7 to Drupal 6, then
-from Drupal 6 to Wordpress 2.3; then from Wordpress 2.3 to Habari .8.
-
-What?
-
-So it seemed that manually copying the data from column to column with sql
-statements would be my best route. After some time (and so so many browser
-tabs), I finally came up with some queries that would migrate you from Drupal 7
-to Habari .8. Please keep in mind that these will not migrate all of your data.
-These are only for migrating your posts and their related comments.
-
-Assumptions:
-
-* Habari instance table prefix is habari_
-* Drupal instance table prefix is drupal_
-* Our author user id is 2
-
-
-----
-- Move our posts over using the drupal ids so we can relate our comments later
-insert into `habari_posts` (id, title, slug, content, user_id, status, pubdate, updated) select nid,title,title,body_value, 2, status, created, changed from drupal_node join drupal_field_data_body on drupal_node.nid=drupal_field_data_body.entity_id;
-----
-
-Here we are doing a simple insert into habari_posts from another table.
-However, due to Drupal's robust database structure (not sure if it's 3NF), we
-have to query another table for our remaining post data as the meta-data (post
-subject, various dates, status, etc) is stored in the drupal_node table and the
-actual post is stored in the drupal_field_data_body table.
-
-Once again, in this query I have statically defined user id 2. You will need to
-change this to your user's ID in Habari who you want to show up as posting
-everything. If you need to import multiple user's posts, you will need to query
-for the Drupal user IDs and change the Habari user IDs to match the posts
-(that's the easiest way).
-
-----
-- update our drupal published status to the habari version
-update habari_posts set status=2 where status=1;
-- update our drupal draft status to the habari version
-update habari_posts set status=1 where status=0;
-----
-
-Here we are just converting our post statuses from
-Drupal values to Habari values. In Habari, status 1 is published and
-status 0 is draft (as of 2011.12.30).
-
-----
--Now we migrate our comments
-insert into habari_comments (post_id, name, email, url, ip, content, status, date) select nid, name, mail, homepage, hostname, comment_body_value, status, created from drupal_comment join drupal_field_data_comment_body on drupal_comment.cid=drupal_field_data_comment_body.entity_id;
-----
-
-Here we are grabbing the comments for each of the posts. Since we pulled in all
-the post IDs from the Drupal tables in our first query, we can do the same here
-and everything should line up perfectly. Once again, like with the posts,
-Drupal stores comment data in more than one table. In Drupal, the comment
-meta-data is stored in the drupal_comment table and the actual comment data is
-stored in the drupal_field_data_comment_body table.
-
-And that should be it. You've just migrated all of your post and comment data
-to Habari .8. If you have any images used in your posts, you will also need to
-copy Drupal's *sites/default/files/* directory to the root directory of your
-Habari instance.
-
-If anyone tries this out, please let me know how it worked for you. It worked
-fine for me (evidenced by the fact that bitnode is still viewable), but I'd
-like some input on how to better write these queries in case there are any
-additional fields I may have missed that people would be interested in. Thank's
-for reading!
-
-
-Category:Drupal
-Category:Habari
-Category:Blogging
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Mounting_Drives_in_Linux_Without_Root.ascii b/src/Mounting_Drives_in_Linux_Without_Root.ascii
deleted file mode 100644
index 9aa2d9a..0000000
--- a/src/Mounting_Drives_in_Linux_Without_Root.ascii
+++ /dev/null
@@ -1,50 +0,0 @@
-Mounting Drives in Linux without Root
-=====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently took an interest in building my own debian box that was roughly
-equivelant to Linux Mint or Ubuntu. More specifically, I wanted a system built
-from a bare bones Debian base, but had all the functionality that was offered
-by the more major "end user" distributions. I wanted features such as media
-capabilities (video, DVDs, and music), surfing the internet, editing photos,
-etc.
-
-As I used my system, I took note of what else I needed to add to it to make it
-more laptop friendly. One of the things it was missing was the ability to mount
-external storage devices (usb sticks for the most part) without being root.
-After many days of frustration, I finally caved and started making custom
-adjustments outside of simply installing software. Here is how you mount an
-external storage device in Debian Linux without needing root permissions.
-
-It is really quite simple...
-
-First, you need to configure the permissions on your mount location. In Debian
-Linux, this location is /media. The permissions on that directory are set by
-default to 744. When we mount an external device, we want a directory to be
-created in there to give mount a location to mount to. In light of this, we
-need the permissions on the /media directory to be 777. To achieve this, open
-your root cli and run the following command:
-
-----
-chmod -R 777 /media
-----
-
-And you should now be done!
-
-For me however, I experienced a small problem with this. In my /etc/fstab file,
-there was an entry for a cdrom located at sdb1 there. Since that is not where
-my cdrom is located, I just commented out that line, and all worked as
-expected.
-
-Just one more step towards the perfect Linux system.
-
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Mutt:Email_Notifications.ascii b/src/Mutt:Email_Notifications.ascii
deleted file mode 100644
index 3ec09bf..0000000
--- a/src/Mutt:Email_Notifications.ascii
+++ /dev/null
@@ -1,72 +0,0 @@
-Mutt:Email Notifications
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I use the http://www.mutt.org/[mutt] email client for all of my email accounts
-(in combination with mbsync for offline mail). One of the things I've hear
-quite a few people complain about is its lack of notification integration, be
-it through libnotify, system sounds, or any other means. Rightefully so, it is
-a bit annoying to have to keep checking your terminal for new mail. With that,
-I wrote a simple script to remedy the issue.
-
-This script uses the inotify-tools to watch the given directory for new files
-without having to loop and execute commands every x seconds, consuming many
-system resources. Inotify is very small and will not bog down your machine (it
-uses the linux kernel inotify subsystem to sleep until triggered by the
-specified filesystem event).
-
-----
-#!/usr/bin/env bash
-
-# Command to be executed when a change occurs
-cmd='mplayer -really-quiet ~/.sounds/notify.ogg'
-
-# Require user to specify directory to be watched
-if [[ -z ${1} ]]; then
- echo "Please specify a directory to be monitored."
- exit 1
-fi
-
-monpath=${1}
-
-# Verify directory to be monitored exists
-if [[ ! -d ${monpath} ]]; then
- echo "Path ${monpath} does not exist. Please check the path and try again."
- exit 1
-fi
-
-echo "Monitoring ${monpath}"
-
-while [ 0==0 ]; do
- # Wait for a file creation operation
- inotifywait -e create -r "${monpath}"
- # Display with really quiet because mplayer is chatty
- ${cmd}
-done
-----
-
-
-[[usage]]
-== Usage
-
-To use this script, save its source to a path where it can be called easily. I
-put mine at __~/bin/dir_notify.sh__.
-
-Once you've tweaked the cmd variable to your liking, simply execute the script
-and background by following the command with a & if you like. Send yourself a
-few emails to test it.
-
-----
-dir_notify.sh ~/Mail
-----
-
-Category:Mutt
-Category:Linux
-Category:Scripts
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Mutt:Sorting_Mail_Like_a_Boss.ascii b/src/Mutt:Sorting_Mail_Like_a_Boss.ascii
deleted file mode 100644
index ecf31f7..0000000
--- a/src/Mutt:Sorting_Mail_Like_a_Boss.ascii
+++ /dev/null
@@ -1,61 +0,0 @@
-Mutt:Sorting Mail Like a Boss
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Mutt is a relatively difficult mail client to learn. However, as most if not
-all mutt users will tell you, once you've got the hang of it, no other mail
-client can come close to matching the efficiency and speed of command line mail
-(or any thing else in command line for the most part). I recently started
-using mutt myself and just can't get over how efficient it is once you've got
-your configuration sorted out. Yesterday I easily cleaned out 800 emails in
-about five minutes using some very simple search terms (and no I didn't just
-delete randomly 800 emails). Unlike the title of this post implies though, I
-am not amazing with mutt, but what I do know, however, can get me around very
-quickly. Here's what I use nearly every day.
-
-
-[[tags]]
-=== Tags
-
-Mutt supports this neat thing called tagging. It's basically the command line
-equivelant of multiselect (ctrl+click or shift+click).
-
-**To tag a message**, move the cursor to it and hit the _t_ key.
-
-**To tag a group of emails based on a pattern**, for example "Change Requests",
-hit capital __T__. You will see at the bottom of your mutt window __Tag
-messages matching:__. Type your tag term, hit enter, and you should see several
-highlighted messages now.
-
-Finally, **to peform an action on all tagged messages**, preceed the command
-letter (s for save/move, d for delete, N for new, etc) with a semicolon ( ; ).
-For instance, do delete all tagged messages, type __;d__.
-
-
-[[limit-filter]]
-=== Limit (Filter)
-
-In mutt, you can do this really cool (though not novel in the slightest) thing
-wherein you filter the displayed messages by a regular expression. You don't
-have to use a regular expression of course, but never the less it can be done.
-
-**To limit/filter the currently displayed emails**, head over to a directory
-you want to get a better filtered look at and press the _l_ key. You will see
-at the bottom of your mutt window ''Limit to messages matching: ''. Type
-whatever you want to limit to.
-
-Note here though that limit by default only searches mail meta data unless
-otherwise specified. This makes searches go much faster since most of the time
-you're just cleaning up your inbox by subject, receivers, and date. If you do
-want to search the body of your emails, preceed your search term with __~B__,
-and mutt will go the extra mile and search email bodies for you.
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Mutt:Useful_Muttrc_Color_Regular_Expressions.ascii b/src/Mutt:Useful_Muttrc_Color_Regular_Expressions.ascii
deleted file mode 100644
index fe9820e..0000000
--- a/src/Mutt:Useful_Muttrc_Color_Regular_Expressions.ascii
+++ /dev/null
@@ -1,40 +0,0 @@
-Mutt:Useful Muttrc Color Regular Expressions
-============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I was working on my muttrc file this morning to make it highlight more stuff
-and thought I'd share a few of my regexes with the world (not that anyone using
-mutt wouldn't likely already know how to do this).
-
-
-[[phone-numbers]]
-== Phone Numbers
-
-I find it useful to have my mail client highlight phone numbers. When you're
-late for a meeting you need to dial in to, it's nice to have the phone number
-stand out from the rest of the text so you don't have to hunt around the email
-for it. Here's my regex for phone numbers (colors included).
-
-----
-color body brightcyan black "+\?[0-9]{0,2}[ \-]\?[\( ]{0,3}[0-9]{0,3}[\-\. \)]{0,3}[0-9]{3}[\-\. ][0-9]{4}"
-----
-
-In case you don't want to read through that to figure our what formats of phone
-numbers that supports, or don't know regular expressions, here's a few examples
-
-* (123)456-7890
-* ( 123 ) 456-7890
-* 123.456.7890
-* 123 456 7890
-* +1 123-456-7890 (up to two digit international numbers)
-* +1 (123) 456-7890
-
-Category:Mail
-Category:Mutt
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/MySql:Find_all_Required_Columns.ascii b/src/MySql:Find_all_Required_Columns.ascii
deleted file mode 100644
index 62ba323..0000000
--- a/src/MySql:Find_all_Required_Columns.ascii
+++ /dev/null
@@ -1,43 +0,0 @@
-MySql:Find all Required Columns
-===============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I am currently working on a functionality for an app that automatically
-enforces database null constraints in the client side code and in the server
-side code (if null='no' then print * next to title and require the value be set
-in the form postback). Basically, what I need to do is to query the database
-for all columns that are Null="NO". Initially, I looked into the show
-command...
-
-----
-'SHOW Columns FROM dbname.tablename where `Null`='NO';
-----
-
-That does almost what I want. However, this unfortunately returns more data
-than I need, and I'd like to avoid parsing the data if I can get MySql to give
-me only the data I want. After searching around a bit more, I discovered that
-one of the default databases in MySql contains exactly what I needed:
-**information_schema**.
-
-The query to grab all fields that cannot be null is not actually too
-complicated thankfully.
-
-----
-SELECT column_name FROM information_schema.columns WHERE is_nullable='NO' && table_name='mytable' && table_schema='mydatabase';
-----
-
-So here, we're grabbing the column_name field from the columns table within the
-information_schema database provided the is_nullable field is equal to 'no';
-The rest is simply filtering it all down so it only returns the column names
-for our particular table (the table_name field) inside of our particular
-database (the table_schema field).
-
-
-Category:MySQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/My_.bashrc.ascii b/src/My_.bashrc.ascii
deleted file mode 100644
index 938e44b..0000000
--- a/src/My_.bashrc.ascii
+++ /dev/null
@@ -1,40 +0,0 @@
-Not that any of you care that much, but I thought I might post my .bashrc file
-as it evolves in case anyone is looking for something I have in mine. I have
-made quite a few of them and ultimately end up cleaning them out entirely on
-occasion to start over so I can keep it clean with only the things I need.
-
-That said, here's what we've got so far. The initial contents at the top are
-from the Arch Linux skel file.
-
-I'll keep updating this as I make changes.
-
-----
-#
-# ~/.bashrc
-#
-
-# If not running interactively, don't do anything
-[[ $- != *i* ]] && return
-
-alias ls='ls --color=auto'
-PS1='[\u@\h \W]\$ '
-
-# My modifications
-export TERM=xterm-256color
-
-# This will spawn a new tmux instance as our "shell" if tmux isn't already
-# running
-if [[ -z $TMUX ]]; then
- exec tmux
-else
- echo
-fi
-----
-
-
-
-Category:SSH
-Category:Bash
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/My_Favorite_Blogs.ascii b/src/My_Favorite_Blogs.ascii
deleted file mode 100644
index 1f315bb..0000000
--- a/src/My_Favorite_Blogs.ascii
+++ /dev/null
@@ -1,22 +0,0 @@
-My Favorite Blogs
-=================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctype}
-
-I ride the train to and from work which means I need some good quality reading.
-Recently though, I left one of my sites, slashdot org, because of the people. I
-feel like the comments have becom for the most part uninformed, malicious,
-and/or just generally not constructive. However, replacing such a good tech
-news site is no small task. It took me a good week of clicking through to go
-back to original posts from Ars Technica before I found the one. With that,
-here are my favorite blogs/aggregators thus far.
-
-* http://artofmanliness.com/[The Art of Manliness]
-* http://lxer.com[lxer.com]
-* http://arstechnica.com[Ars Technica]
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/My_Favorite_Open_Source_Projects.ascii b/src/My_Favorite_Open_Source_Projects.ascii
deleted file mode 100644
index e900965..0000000
--- a/src/My_Favorite_Open_Source_Projects.ascii
+++ /dev/null
@@ -1,104 +0,0 @@
-My Favorite Open Source Projects
-=================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctype}
-
-I really enjoy open source software. It is somewhat lost on me why most
-enterprises are so heavily dependant on proprietary software when the open
-source equivelants are better and/or more widely used (Apache/nginx, Linux,
-bash, git, etc.), or the project the software was purchased for certainly
-doesn't merit the cost or need the support (eg: an Oracle instance for a four
-column single table test database, really?).
-
-All that goes to say though that I really like open source software. I try my
-best almost to use it simply on principle, overlooking the shortcomings of some
-of the projects just because the code is open and available. With that, here
-are some of my favorite projects right now (not necessarily in order or
-awesomeness)
-
-
-[[server-software]]
-== Server Software
-
-1. **http://www.linux.com[Linux]**: I'm a fan of
- http://www.debian.org/[Debian] (yeah yeah yeah, I don't want to hear
- about the political war going on around this one).
-
-2. **http://archlinux.org[Arch Linux]**: Severed limb style bleeding edge
- Linux distribution. Requires a medium to high level of Linux experience to
- even get it installed unless you're looking for a really great Saturday
- afternoon project (because what IT guy wakes up before 11:00 on a Saturday
- anyways).
-
-3. **http://git-scm.com[Git]**: Code Versioning System. Not easy for mere
- mortals at first, as someone once said. It sure is amazingly powerful.
-
-4. **http://www.mediawiki.org[Mediawiki]**: Standard wiki but it's really
- great for blogging, team documentation, project documentation tracking, etc.
- https://wikipedia.org[Wikipedia] runs on this.
-
-5. **http://www.php.net/[PHP]**: Server side script language. Fantastic for
- building websites. Also really amazing replacement for Perl for server side
- scripting in my opinion. Supports object oriented programming and is under
- constant development. I really can't say enough good things about this
- project.
-
-6. **https://mariadb.org[MariaDB]**: Forked from the MySql project by its
- founder, http://en.wikipedia.org/wiki/Monty_Widenius[Monty Widenius] when
- the open source grimm reaper...er...Oracle/Sun acquired it in 2008 (no, I'm
- not opinionated at all).
-
-7. **http://httpd.apache.org[Apache]/http://nginx.org[Nginx]**: Two really
- solid web servers that currently occupy the top two slots on the most widely
- used web servers in the world (yay for open source!)
-
-8. **http://www.ejabberd.im[ejabberd]**: Pretty popular XMPP server software
- with a good developement speed. Built with erlang.
-
-
-[[desktop-software]]
-== Desktop Software
-
-1. **http://archlinux.org[Arch Linux]**: Hey look! I used this one twice! ...
- Severed limb style bleeding edge Linux distribution. Requires a medium to
- high level of Linux experience to even get it installed unless you're
- looking for a really great Saturday afternoon project (because what IT guy
- wakes up before 11:00 on a Saturday anyways).
-
-2. **http://www.xfce.org/[XFCE]**: Desktop interface for Linux.
-
-3. **http://www.pidgin.im[Pidgin]**: Instant messaging
-
-4. **http://www.libreoffice.org[Libre Office]**: Document authoring (MS Word,
- Excel, PowerPoint, etc)
-
-5. **http://www.gnu.org/software/coreutils[The GNU Core Utils]**: chmod,
- chown, ls, cut, tail, wc, su, w, etc. Full list of commands provided can be
- found http://en.wikipedia.org/wiki/GNU_Core_Utilities[here]. Basically, if
- you use Linux command line ever, these are the majority of the tools you use
- most frequently. Despite all the politics surrounding Mr. Stallman and his
- amazing beard, the GNU project has done a great job with the core utils, bin
- utils, diff utils, file utils, and
- http://www.gnu.org/software/#allgnupkgs[everything else] they've done.
-
-6. **http://www.blender.org[Blender]**: 3D modeling (not for the faint of
- heart)
-
-7. **http://inkscape.org[Inkscape]**: Vector imaging. Good replacement for
- Adobe Illustrator
-
-8. **http://www.keepassx.org[KeePassx]**: Encrypted password database for
- Linux. Windows equivelant is http://keepass.info/[KeePass].
-
-9. **http://www.gimp.org[Gimp]**: Great replacement for Photoshop, especially
- given that most people purchase Photoshop [Elements] and use it to crop and
- autocolor images. I can't speak highly enough of this software. It may take
- a bit to learn, but it is tremendously powerful.
- http://www.flickr.com/photos/ninepan/galleries/72157626688146138/[Here] is a
- great list of images that people have made using Gimp.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Net_Neutrality.ascii b/src/Net_Neutrality.ascii
deleted file mode 100644
index cafbfa5..0000000
--- a/src/Net_Neutrality.ascii
+++ /dev/null
@@ -1,138 +0,0 @@
-Net Neutrality
-==============
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-My house is wired for two internet service providers: *Comcast* and
-**CenturyLink**.
-
-*Comcast* provides what is called cable internet. They have a network created
-originally with the intent to deliver paid but limited commercial television.
-That clearly didn't work out as intended though becuase cable television now
-has more commercials than free ad-supported air-wave television; but I digress.
-
-*CenturyLink* on the other hand, is a DSL provider. DSL uses the old phone
-network that they didn't build, they just use it. While the maximum speeds of
-DSL internet are slower than the maximum speeds of cable internet, they are
-usually cheaper, likely due to the smaller amount of infrastructure overhead.
-They also have a reputation for being unreliable, though that hasn't really
-been my experience.
-
-Herein lies the problem. My house is wired for *only* two internet service
-providers. In December of 2013, the FCC released
-http://transition.fcc.gov/Daily_Releases/Daily_Business/2013/db1224/DOC-324884A1.pdf[a
-report] in which was detailed the number of landline internet providers
-available per household. The statistics...
-
-* 33% have access to 3+ providers
-* 37% have access to 2 providers
-* 28% have access to 1 provider
-* 2% have access to 0 providers
-
-The survey shows that 67% of households have access to 2 or fewer internet
-service providers. Further, that number will likely not change much in the
-future because the only way to get a new provider into the mix is for that
-provider to use the phone network (DSL), or to have enough funding as a startup
-to build their own network, which is incredibly costly. In other words, the
-cost of entry is so high in this market, that it is a barrier to entry. That
-makes the few landline internet service providers
-http://en.wikipedia.org/wiki/Monopoly[monopolies], doesn't it?
-
-
-[[utilities]]
-== Utilities
-
-The idea has been discussed of declaring internet a utility in the United
-States. That is an interesting thought, full of complications. What are some
-utilities we can compare to get a better idea of what that would look like?
-
-* **Water**: Each house has its own water pipes put in when it's built.
-Laying a second set of pipes for a different water provider would be far too
-costly (probably impossible). The service of in-house water access is
-effectively a monopoly on a city-by-city basis, and thus is eligable for
-government regulation since its existence as a monopoly cannot be avoided.
-
-* **Electricity**: Again, when a house is built, its lines are connected
-to "the grid". That grid is owned usually by a very large company (like Xcel)
-who has the money to build out those lines, or by the city who also has the
-money to build out those lines. Either way, electricity can only be acquired
-from one provider for the given dwelling. Like water, the product of
-electricity is an unavoidable monopoly worthy of government regulation.
-
-* **Gas**: I'll just be quick on this one. Gas, pipes, one provider per
-house = unavoidable monopoly.
-
-The commonalities of the three afforementioned utilities are
-
-* Cost to market entry is prohibitively high by the nature of the
- product
-
-* Government intervention is required to keep sole providers from
- abusing their powers as sole providers
-
-However, if internet is to be a utility, it should [in theory] have similar
-characteristics to a utility, notably, limitations.
-
-Most folks want their unlimited data (I'm one of them). However, when you pay
-for your electricity bill, you may notice that they charge you (in the US) per
-kilowatt hour. With water, they charge for gallons used. With internet, it
-would presumably be charged on a per gigabyte basis. Regulation then would not
-be on how much you get access to, but how much you pay for increments of said
-access. Many companies have implemented a small, medium, large product set
-wherein you pay the company multiple hundreds of percents higher than the
-product is actually worth for a limited product which if you exceed, are
-charged exorbitent fees almost as if you breached a contract. This isn't how
-gas, electricity, or water work. An increment could not be "small, medium, or
-large", but "You used 15.9 gigabytes this month".
-
-
-[[government-regulationownership-stops-innovation]]
-== Government Regulation/Ownership Stops Innovation
-
-The subject of this section makes plain what it is about. If you disagree or
-dislike this, please read anyways as the entire topic of net neutrality should
-not be discussed withtout bringing this in (it's not a real discussion anyways
-if you dismiss the other's viewpoints without first hearing them out).
-
-The United States capitalist-oriented economy and law have without a doubt
-gotten the nation where it is today (for better or for worse). Yes, we have
-some companies (I won't name any, but I'm sure you can think of some) who have
-abused their wealth to exploit people. On the flip side, the United states also
-has the most robust, thriving, and enduring economies in the world. Nearly
-every other nation, if not _every_ other nation bases their currency on ours
-(I'm an American by the way).
-
-It's an easy-to-prove fact that most (always avoid absolutes) game-changing
-innovations have come out of the United States private sector. Some more
-notable ones are Walmart's best-in-world inventory tracking, Amazon's user
-preference algorithms, Google's search algorithms [originated here], computers
-in general (though now they are often manufactured in other countries), Pixar's
-renderman, the internet (though that was originally comissoned by the
-government supposedly), the cell networks. The list could go on.
-
-Now think of the last time you went into a government establishment, be it a
-court house, the DMV, or somewhere else. Did you notice that they're still
-running Windows XP with 4x3 monitors and very old desktops? The best innovation
-we've seen near the DMV as of late is the ability to renew one's driver's
-license on their website. However, as we've seen with the latest healthcare.gov
-screwups (let's face it, that's what it was), the government isn't good at
-doing much that the private sector excells at.
-
-
-[[a-balance]]
-== A Balance
-
-However, if the private sector were really as good at everything as it may seem
-I just implied, why do we even have a government? I won't deny that a
-government is needed to intervene. We do need a governing body that is above
-all others so it can govern. That's why we have anti-monopoly laws that are
-actually enforcable (remember all the attempted purchases of T-Mobile as of
-late?) amongst other laws that protect citizens, and in this case, consumers of
-the internet.
-
-More thoughts more thoughts more thoughts...
-
-Category:Politics
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Non-Root_User_Connect_to_Wifi_in_Arch_Linux_with_SLiM.ascii b/src/Non-Root_User_Connect_to_Wifi_in_Arch_Linux_with_SLiM.ascii
deleted file mode 100644
index 346c86c..0000000
--- a/src/Non-Root_User_Connect_to_Wifi_in_Arch_Linux_with_SLiM.ascii
+++ /dev/null
@@ -1,44 +0,0 @@
-Non-Root User Connect to Wifi in Arch Linux with SLiM
-=====================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I'm a fan of Arch Linux. A big reason for that (for me at least) is that Arch
-doesn't do everything for you. For the most part, I'm a self-taught Linux user,
-so the more things that aren't done for me, the better. In that light, I of
-course have discovered yet another thing that has not been done for me like it
-was in Debian (not knocking Debian at all that's still my server distro). That
-would be the .xinit file for my user. Consequently, since my .xinit file is
-custom made, it is obviously lacking certain handy tricks for increased
-functionality. In this case, non-root wifi connections.
-
-To be able to run [some additional] privileged commands as your non-root user
-without sudo, we need to have a package installed called
-**http://www.freedesktop.org/wiki/Software/ConsoleKit[consolekit]**. When we
-use this to launch our X interface, it gives our GUI applications permissions
-to do more stuff
-(http://theportalwiki.com/wiki/Cave_Johnson_voice_lines[science!]). To do this,
-we need to edit our .xinitrc file so our GUI is launched from within a
-consolekit session so it can make requests for elevated permissions without
-requiring root access.
-
-For our current example, we have xfce4 installed, so we will be editing
-the line that likely looks like <pre> exec startxfce4 </pre> We want our
-xfce4 session to launch from within consolekit, so we change the line to
-look like...
-
-----
-exec ck-launch-session startxfce4
-----
-
-That should be it. Log yourself out of your X session (if you are logged in)
-and log back in and you should be able to connect to wifi networks without
-having to give your root password.
-
-
-Category:Linux
-
-// vim: set syntax=asciidoc:
diff --git a/src/Note-taking_with_Vim.ascii b/src/Note-taking_with_Vim.ascii
deleted file mode 100644
index 4aea378..0000000
--- a/src/Note-taking_with_Vim.ascii
+++ /dev/null
@@ -1,115 +0,0 @@
-Note-taking with Vim
-====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Two vim posts in one day!
-
-My task list at work has recently become so large (it's probably well over a
-year's worth of work now) that I now need to track my tasks somewhere other
-than in my head (documentation is always better than tribal knowledge anyways).
-I realy don't like task tracking becuase most of the applications out there are
-just so heavy for what note-taking actually is. I use vim almost all day, every
-day though, so why not use that (plus it's command line!)?
-
-I spent about thirty minutes writing this up today. It's inspired a bit by the
-LifeHacker article,
-http://lifehacker.com/5592047/turn-your-command-line-into-a-fast-and-simple-note+taking-tool[Turn
-Your Command Line into a Fast and Simple Note Taking Tool] (thanks
-http://mottr.am/[Jack Mottram]).
-
-This will automagically give all of your notes a .wiki extension, telling vim
-to use the mediawiki text syntax highlighter (I use MediaWiki a lot to so I
-figured I'd use that syntax for markup). This can be found
-http://en.wikipedia.org/wiki/Wikipedia:Text_editor_support#Vim[here]. If you
-want to use something else like markdown, just change the $noteExt variable at
-the top to the extension associated with the highlighter you want.
-
-This addition will give you six new commands.
-
-* +**note** [NoteName]+: Opens a note for editing or creates
-a new note. If no note is specified, opens the most recent note.
-* +**mknote** NoteName "Note to append"+: Appends text to the
-requested note.
-* +**catnote** [NoteName]+: Prints the contents of the
-specified note.
-* +**lsnotes**+: Lists all notes by date modified
-* +**findnote** SearchTerm+: Searches all notes for the
-search term (case insensitive) and prints the results along with note
-title and line number on which the term was found.
-* +**mvnote** OldName NewName+: Renames a note
-* +**rmnote** NoteName+: Deletes the specified note.
-
-Add the following to your .bash_profile (or .profile if you're a ksh user)
-
-----
-export base=~/Documents/Notes
-export noteExt=wiki
-# This would be used for markdown
-# export noteExt=md
-note() {
- if [ ! -d $base ]; then
- mkdir -p $base
- fi
- # If note not specified, open most recent
- if [[ -z "$1" ]]; then
- vim $(ls -t $(find $base/ -type f) | head -n 1)
- else
- vim $base/$1.$noteExt
- fi
-}
-
-mknote() {
- echo $2 >> $base/$1.$noteExt
-}
-
-catnote() {
- # If note not specified, cat most recent
- if [[ -z "$1" ]]; then
- cat $(ls -t $(find $base/ -type f) | head -n 1)
- else
- cat $base/$1.$noteExt
- fi
-}
-
-lsnotes() {
- #ls -1 $base/ | sed "s/\(.*\).$noteExt/* \1/"
- echo
- echo -e "Last Modified\tName"
- ls -lt $base/ | tr -s ' ' | cut -d ' ' -f 6,7,8,9 | sed "s/^\(\w\+\) \(\w\w\) \(\w\w:\w\w\) \(.*\).wiki/\1 \2 \3\t\4/"
- echo
-}
-
-findnote() {
- if [[ -n "$1" ]]; then
- contents="Note:Line:Text\n\n"
- contents=$contents$(find $base/ -type f | xargs grep -n -i "$1" | sed "s/.*\/\(.*\)\.$noteExt:\([0-9]\+\):\(.*\)/\1:\2:\3/")
- echo -e "$contents" | column -s ":" -t
- else
- echo "Please specify a search term."
- fi
-}
-
-mvnote() {
- mv $base/$1.$noteExt ~/Documents/Notes/$2.$noteExt
-}
-
-rmnote() {
- if [[ -n "$1" ]]; then
- rm $base/$1.$noteExt
- else
- echo "Please specify a note."
- fi
-}
-----
-
-
-Category:Linux
-Category:Vim
-Category:Productivity
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Note_to_self:Connecting_ASP.Net_to_SQL.ascii b/src/Note_to_self:Connecting_ASP.Net_to_SQL.ascii
deleted file mode 100644
index 178a66f..0000000
--- a/src/Note_to_self:Connecting_ASP.Net_to_SQL.ascii
+++ /dev/null
@@ -1,18 +0,0 @@
-Note to Self:Connecting ASP.Net to SQL
-======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-*Note to self:* When connecting your back-end code for ASP.Net to a SQL Server
-instance, always remember to include the SQL instance name in the connection
-string.
-
-HOSTNAMESQLEXPRESS (or another instance name)
-
-
-Category:ASP.Net
-Category:MsSQL
-Category:Microsoft
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Open_Source_Living:Browsers.ascii b/src/Open_Source_Living:Browsers.ascii
deleted file mode 100644
index e71c62f..0000000
--- a/src/Open_Source_Living:Browsers.ascii
+++ /dev/null
@@ -1,41 +0,0 @@
-Open Spource Living:Browsers
-============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Open source software has got to be one of the coolest things to hit planet
-earth in a long time. Anyone who's asked me about open source software knows my
-opinions well. That or they have no idea because they tune out about five
-minutes into my giddy speech about it.
-
-Either way, it's no secret that I'm a big fan. With all that being said, I have
-spent a lot of time researching open source alternatives to my old day-to-day
-options. Working 100% in Linux all the time, open source is about all you have.
-With that, let's talk about browsers.
-
-For my day-to-day browser, I typically use Chromium. It is fast, installs fine
-on the two distros I use (Debian and Arch), has incredible developer tools
-built in, and has no odd dependencies on weird libraries that require me to
-install all kinds of stuff I don't want just to use it (ie: Gnome or KDE
-stuff). It's just plain and simple Chromium.
-
-Many of you have no doubt heard of Google Chrome and are wondering why not
-that. Google Chrome is a branch/rebrand of the Chromium project, which was in
-fact started by Google. Chromium is not Google branded and often contains
-functionality that is soon to come to Google Chrome.
-
-Other open source browsers...
-
-* http://mozilla.org[Mozilla Firefox]
-* http://en.wikipedia.org/wiki/Midori_(web_browser)[Midori]
-* http://wiki.debian.org/Iceweasel[Ice Weasel] (a fork of Mozilla
-Firefox)
-
-
-Category:Open_Source
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Opening_CHM_Files_in_Vista.ascii b/src/Opening_CHM_Files_in_Vista.ascii
deleted file mode 100644
index 842d14d..0000000
--- a/src/Opening_CHM_Files_in_Vista.ascii
+++ /dev/null
@@ -1,43 +0,0 @@
-Opening CHM Files in Vista
-==========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-If any of you have been following me on Twitter recently, you know I've been
-researching the installation of TFS. I had pretty good luck with a lot of
-websites regarding TFS 2008, but not so much with 2010 Beta 1 (naturally). The
-only source for detailed documentation was Microsoft, so I went with what I had
-and downloaded the document. If you hadn't guessed yet from the title of this
-post, the file had a .chm extension. After downloading the .chm file, I
-proceeded to open it. Much to my dismay, it opened but the pages would not
-load. I was getting 404 errors on every topic. After spending a few hours
-searching, I found a solution. Apparently the .chm extension is not secure and
-Vista removed the ability to read files like that unless they are in a
-specified 'safe' location. I don't particularly want to have to download any
-.chm file to an obscure location and then have to find that location again to
-open the file. Naturally, I searched for a simpler solution.
-
-The solution I found, simpler or no, worked. You guessed it...it's a registry
-hack.
-
-I have a theory that one can literally do anything with registry hacks...things
-like make pigs fly or make hell freeze over.
-
-Here's what to do:
-
-1. Right click your .chm file and select *Properties*
-2. Click *Unblock* and select *Apply*
-3. Open up *Regedit*
-4. Modify the DWORD *MaxAllowedZone* to equal *1* (if it doesn't exist, create
- it)
-5. Close all Internet Explorer Windows and start up your .chm file
-
-That's all there is to it. I haven't had any problems with any .chm files
-since.
-
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/OwnCloud_Documents_on_Arch_Linux.ascii b/src/OwnCloud_Documents_on_Arch_Linux.ascii
deleted file mode 100644
index 891a1d4..0000000
--- a/src/OwnCloud_Documents_on_Arch_Linux.ascii
+++ /dev/null
@@ -1,38 +0,0 @@
-OwnCloud Documents on Arch Linux
-================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This is just a quick note for those folks who are running Arch Linux on their
-servers (I am). I was having trouble getting MS Word documents to render on my
-instance, despite having installed LibreOffice (fresh). When I went to enable
-openoffice/libreoffice support, it errored out, telling me to check my logs,
-which gave me nothing more than an ambiguous 500 error (I checked the php,
-php-fpm, nginx, and owncloud logs). Finally, I su'd into the account that
-owncloud/php was running under and attempted to execute the libreoffice binary,
-which failed (Wooo! Useful output!). This is the error I received.
-
-----
-Failed to open display
-javaldx: Could not find a Java Runtime Environment!
-Warning: failed to read path from javaldx /usr/lib/libreoffice/program/soffice.bin: error while loading shared libraries: libcups.so.2: cannot open shared object file: No such file or directory
-----
-
-Most of that can be ignored as they are just warnings. There is one error
-however and that is the cups error (the last line).
-
-For LibreOffice to support document exporting, it needs cups (the daemon
-doesn't need to be running, we just need the library).
-
-Once you've got cups installed (__pacman -S cups__) and support enabled in
-ownCloud, you should be able to work on MS Word files in-browser.
-
-Category:ownCloud
-Category:Linux
-Category:Libreoffice
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/PHP-5.3:Class_Exception_Not_Found.ascii b/src/PHP-5.3:Class_Exception_Not_Found.ascii
deleted file mode 100644
index 97128fe..0000000
--- a/src/PHP-5.3:Class_Exception_Not_Found.ascii
+++ /dev/null
@@ -1,31 +0,0 @@
-PHP-5.3:Class Exception Not Found
-=================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-If in PHP 5.3 (likely just PHP 5) you define a namespace for your current class
-and try to throw a new Exception, it will fail with the following error
-
-----
-Fatal error: Class 'your\current\class\namespace\Exception' not found in YourClass.php on line eleventy-billion.'
-----
-
-The problem is that when PHP fails to find a class in the current namespace
-(other than root of course), it doesn't automagically search the root
-namespace. The Exception object exists in the root namespace (unless you
-created your own) so PHP won't find an it because it doesn't exist in your
-class's defined namespace.
-
-The solution is to define the root namespace before your new Exception
-object.
-
-----
-throw new \Exception('This will do nicely');
-----
-
-Category:PHP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Perfect_Server_Debian_Installation_-_Pureftpd_Won't_Start.ascii b/src/Perfect_Server_Debian_Installation_-_Pureftpd_Won't_Start.ascii
deleted file mode 100644
index 47eaaa5..0000000
--- a/src/Perfect_Server_Debian_Installation_-_Pureftpd_Won't_Start.ascii
+++ /dev/null
@@ -1,93 +0,0 @@
-Perfect Server Debian Installation - Pureftpd Won't Start
-=========================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently rebuilt the server hosting my website on a different vps host using
-falko's "The Perfect Server Debian Lenny (Debian 5.0) With MyDNS &amp; Courier
-[ISPConfig 3]" and ran into some pretty problematic setbacks. Specifically,
-pure-ftpd wouldn't start up. Even more specifically, when trying to start the
-service, it gives you this fantastically ambiguous error:
-
-----
-Starting ftp server: Running: /usr/sbin/pure-ftpd-mysql-virtualchroot -l
-mysql:/etc/pure-ftpd/db/mysql.conf -l pam -u 1000 -E -A -b -O
-clf:/var/log/pure-ftpd/transfer.log -D -H -B
-----
-
-Give me ambiguity or give me something else, yeah?
-
-Anyways, I've experienced this problem two other times. Sadly, both times I
-forgot where the solution was located on the interwebz. Finally, the third time
-around I've found it again and here we go.
-
-**The problem resides in the fact that the server is a vps hosted through
-either virtuozzo or openvz**. Apparently, unless otherwise configured
-differently, the virtual containers don't support something that prevents
-pure-ftpd from starting up. Sorry for the ambiguity, but I'm really not sure
-what it is. *The solution is to build pure-ftpd from source, "without
-capabilities".* Let's get started Run the command...
-
-----
-apt-get source pure-ftpd
-----
-
-Once that has been downloaded, change directories into the apt archives
-directory...
-
-----
-cd /var/cache/apt/archives/pure-ftpd*
-----
-
-Inside here you'll find a directory called debian. Run the following command
-
-----
-vi ./debian/rules
-----
-
-add --without-capabilities to optflags in debian/rules
-
-Once you've done that, run the following command.
-
-----
-apt-get build-dep pure-ftpd
-----
-
-Annnd another command
-
-----
-dpkg-buildpackage -uc -b
-----
-
-Congradulations! You just built pure-ftpd from source with a custom flag! Now
-it's time to install the package. To do this, we're going to run ANOTHER
-command (they don't call it command line for nothing).
-
-----
-dpkg -i ../pure-ftpd-common_1.0.1-8_i386.deb
-----
-
-Finally, run this one last command to install MySql for pure-ftpd so the
-service can connect to a MySql database where your users and their permissions
-will be stored.
-
-----
-dpkg -i ../pure-ftpd-mysql_1.0.1-8_i386.deb
-----
-
-And finally you are done. Hopefully this helped some poor lost soul out there
-besides just mine. If you find any problems or just want to tell the world
-something, please leave a comment in the comments section (spam bots, you
-aren't included in that offer. If you try I will http://akismet.com/[Akismet]
-you!).
-
-
-Category:Linux
-
-Category:Debian Category:FTP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Performing_a_MySql_Backup_Via_Command_Line.ascii b/src/Performing_a_MySql_Backup_Via_Command_Line.ascii
deleted file mode 100644
index 91e049e..0000000
--- a/src/Performing_a_MySql_Backup_Via_Command_Line.ascii
+++ /dev/null
@@ -1,42 +0,0 @@
-Performing a MySql Backup Via Command Line
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A few months back I was running backups of the ol' web server and realized that
-it was going to be a pain since I was starting to host more and more websites.
-Logging into phpMyAdmin and exporting every database can be a bit cumbersome
-when you have a lot of databases. With that, I wanted a good solution for
-dumping my entire database server. Thankfully, the solution is a really simple
-one (if you have console access). Many thanks to the MySql devs for creating
-this feature (as if they wouldn't what good is a database you can't back up
-after all).
-
-As I mentioned, this is really simple. To export all of your databases
-that you have running, including create statements, run the following
-command...
-
-----
-mysqldump -u root -p --all-databases > /tmp/dumpfile.sql
-----
-
-So here's what we just did.
-
-* *-u* root specifies the user. In this case, root is who we are logging
-in.
-* *-p* makes it ask for a password (it will try to log in without using
-a password if this isn't used)
-* *--all-databases* makes it export all databases (duh)
-
-Ordinarily, this command outputs to the console. It's not the most useful thing
-in the world unless you use the greater than. The > makes it write that output
-to the specified location rather than to your console window.
-
-
-Category:MySQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Postback_Freezes_Animated_Gifs.ascii b/src/Postback_Freezes_Animated_Gifs.ascii
deleted file mode 100644
index 092d181..0000000
--- a/src/Postback_Freezes_Animated_Gifs.ascii
+++ /dev/null
@@ -1,60 +0,0 @@
-Postback Freezes Animated Gifs
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hello again all,
-
-<rant>In all my experiences in my life as a geek, I have found few things more
-frustrating than developing something for any version of Internet Explorer
-(please hold your shouts of agreement for the end). Internet Explorer 5 never
-really existed (did the internet exist then even?), Internet Explorer 6 was a
-complete atrocity, Internet Explorer 7 I am pretty sure caused the suicide rate
-amongst us geeks to go up significantly, and Internet Explorer 8, while better
-than its predecessors, only caused a few geeks to become severely dependent on
-mind-altering drugs to help them cope with the frustrations of life (or maybe
-just web development for IE).</rant>
-
-You may now cheer...
-
-Now, down to business. On the topic of Internet Explorer doing things
-differently from the rest of the world simply for the sake of it (hey look,
-they're taking after Apple), I have recently experienced a very frustrating
-problem with animated gifs. Referring to my previous post about the file
-uploader, the client I was developing that for wanted an animation icon for the
-upload so their customers didn't think the page had frozen. Sounds like a
-simple task, no?
-
-*The problem can be described as this:* When a postback event occurs (ie:
-clicking a link or submit button), Internet Explorer freezes all animated gifs
-on the page.
-
-*To explain how I fixed this,* I essentially placed an animated 'rotating
-circle' on the page which was hidden until the onSubmit() function was called.
-Here's the code for the image while it was hidden.
-
-----
-<img src="./images/loading.gif" id="loader" style="visibility:hidden;" />
-----
-
-Annnd here's the code for the animation problem fix as well as the code that
-changes the image visibility.
-
-----
-function showLoader(){
- //*** Reload the image for IE ***
- document.getElementById('loader').src='./images/loader.gif';
- //*** Let's make the image visible ***
- document.getElementById('loader').style.visibility = 'visible';
-}
-----
-
-Category:HTML
-Category:Internet_Explorer
-Category:Microsoft
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/ProPresenter:Action_Hotkeys_Not_Working.ascii b/src/ProPresenter:Action_Hotkeys_Not_Working.ascii
deleted file mode 100644
index 26daa1f..0000000
--- a/src/ProPresenter:Action_Hotkeys_Not_Working.ascii
+++ /dev/null
@@ -1,36 +0,0 @@
-ProPresenter:Action Hotkeys not Working
-=======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-My church recently got a new Mac mini for our media computer (we used to have a
-Windows 7 machine). During setting this new system up, I realized that the
-ProPresenter action hotkeys didn't work (eg: Clear all is F1, Clear foreground
-is F2, Clear backgrounds is F3, etc). I don't know about you, but having
-hotkeys is a pretty priceless piece of efficient computing, especially if
-you're running media in a fast-paced presentation environment. After a little
-research, I discovered that Apple, in their infinite wisdom (because that's not
-an inflammatory statement), in fact disabled the OS functionality for the
-function keys and by default they control hardware functionality (eg: F1 and F2
-control brightness, F3 does nothing, F4 does nothing, F5 does nothing, F6 does
-nothing...getting the idea?). Here's how you fix it.
-
-Head on over to the __System Preferences__. In there, select __Keyboard__.
-There are two tabs in there (Keyboard and Keyboard shortcuts). If _Keyboard_
-isn't selected, select it. On that page you should see a checkbox labeled
-"__Use all F1, F2, etc. keys as as standard function keys__". Check that box
-and all of your ProPresenter action hotkeys should work now (Yay!).
-
-If anyone wants to look up my source for this, I found the solution in the
-Apple knowledge base http://support.apple.com/kb/HT3399[here].
-
-
-Category:ProPresenter
-
-Category:Apple
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/ProPresenter:Automatically_Advancing_Slide_Loops.ascii b/src/ProPresenter:Automatically_Advancing_Slide_Loops.ascii
deleted file mode 100644
index cdf686b..0000000
--- a/src/ProPresenter:Automatically_Advancing_Slide_Loops.ascii
+++ /dev/null
@@ -1,56 +0,0 @@
-ProPresenter:Automatically Advancing Slide Loops
-================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This last weekend I ran video for my church and needed to create a slide loop
-for announcements at the beginning of service. Now, I'm a MediaShout guy still
-so I'm used to whole projects, Bible verse _references_ instead of playlists,
-queues, and the good ol' quad monitor (come on, who doesn't love that thing).
-As I have come to find out however, ProPresenter is not MediaShout in any way
-(not that that is a bad thing). To make matters a bit more difficult, Google
-didn't have much on how to do this...until now (queue dramatic music). Before
-we get going though, I want to mention that this could be done with a timeline
-and playlist. For this time around though we will just use timers. With that,
-let's get started!
-
-Firstly, create a new item in the Library
-
-image:files/ProPresenter_Slide_Loops00.png[height=350]
-
-You should now have an empty playlist.
-
-Add the images you want from your Video/Image bin. To do more than one at a
-time without launching the slide on the projector, use ctrl+click to select
-multiple items and ctrl+click to drag to the playlist window.
-
-image:files/ProPresenter_Slide_Loops01.png[height=400]
-
-Once you've added all the images you want for your loop, right click each one
-and select **Go to Next Timer...**.
-
-image:files/ProPresenter_Slide_Loops02.png[height=400]
-
-A new window will open with a field labeled _Seconds_ and a checkbox labeled
-__Loop to first slide__. Click the up arrow or manually enter the number of
-seconds you want each slide to display for before advancing to the next. Also,
-you typically don't want to check the _Loop to first slide_ unless you want
-your slide loop to end on that slide.
-
-image:files/ProPresenter_Slide_Loops03.png[height=200]
-
-Once you have applied timers to all the slides you want in your playlist,
-simply add the library playlist to the presentation playlist. Now if you
-launch any of the slides in the library playlist, it will auto-advance and loop
-through no matter which slide you launch first.
-
-image:files/ProPresenter_Slide_Loops04.png[height=400]
-
-
-Category:ProPresenter
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/ProPresenter:Edit_Mode.ascii b/src/ProPresenter:Edit_Mode.ascii
deleted file mode 100644
index 5c5dc43..0000000
--- a/src/ProPresenter:Edit_Mode.ascii
+++ /dev/null
@@ -1,49 +0,0 @@
-ProPresenter:Edit Mode
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-This week one of our media folks contacted me to ask if I knew how to get
-ProPresenter to launch slides again. He said he was clicking them, but they
-wouldn't launch on the external or primary monitors. Instead, they were simply
-selecting.
-
-This one actually took me a while to figure out because like most things with
-ProPresenter, there is very little if any documentation on it. That said, let's
-get started with this document.
-
-If you look at the top left of your slide list, you'll see a little icon of a
-lock (it may be locked or unlocked). If you're having issues getting slides to
-launch, chances are this icon is showing "unlocked".
-
-image:ProPresenter-Slide_lock-unlocked.png[height=300]
-
-This effectively means you're in edit mode instead of presentation mode (I'm
-making these terms up - they're not official in the slightest).
-
-If you are showing unlocked, simply click it to lock the presentation so you
-can launch slides again.
-
-image:ProPresenter-Slide_lock-locked.png[height=300]
-
-[[uses-for-edit-mode]]
-Uses for Edit Mode
-~~~~~~~~~~~~~~~~~~
-
-Edit mode is great for building presentations. When in it, you can select
-multiple slides and drag them around to reorder them. If you aren't in edit
-mode though and you need to move slides around on the fly, you're in luck! Just
-press and hold the Ctrl key and you'll see the "locked" icon temporarily switch
-to "unlocked". While you have the key pressed, you can click and drag slides
-around all you want. You can even select multiple and really start to mess
-things up quick!
-
-image:ProPresenter-Multiselect_Move.png[height=300]
-
-Category:ProPresenter
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Puppet:Out_of_Range_for_Type_Integer.ascii b/src/Puppet:Out_of_Range_for_Type_Integer.ascii
deleted file mode 100644
index e92b002..0000000
--- a/src/Puppet:Out_of_Range_for_Type_Integer.ascii
+++ /dev/null
@@ -1,116 +0,0 @@
-Puppet:Out of Range for Type Integer
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-This week we ran into a rather small (or is it large) problem with our puppet
-instance. We logged into the puppet console and noticed that there were over
-37,000 pending tasks, and the list was growing fast. Checking the logs, we saw
-an "out of range" exception. An out of range exception for an enterprise
-product is never a good thing. It's almost as bad as a segmentation fault in an
-enterprise product, something you can do nothing about if you don't have access
-to the source code. In this case though, we actually can do something about
-this particular issue.
-
-Here's the exact error we were seeing...
-
-----
-2015-04-17T22:30:15+0000: [Worker(delayed_job.7 host:http://foosite.com pid:17446)] Class#create_from_yaml failed with ActiveRecord::StatementInvalid: PG::Error: ERROR: value "2147716789" is out of range for type integer: INSERT INTO "resource_events" ("audited", "desired_value", "historical_value", "message", "name", "previous_value", "property", "resource_status_id", "status", "time") VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) RETURNING "id" - 2 failed attempts 2015-04-17T22:30:15+0000: [Worker(delayed_job.7 host:http://foosite.com pid:17446)] PERMANENTLY removing Class#create_from_yaml because of 3 consecutive failures.
-2015-04-17T22:30:15+0000: [Worker(delayed_job.7 host:http://foosite.com pid:17446)] 1 jobs processed at 0.3037 j/s, 1 failed ...
-2015-04-17T22:30:15+0000: [Worker(delayed_job.2 host:http://foosite.com pid:17361)] Class#create_from_yaml failed with
-ActiveRecord::StatementInvalid: PG::Error: ERROR: value "2147716814" is out of range for type integer
-----
-
-
-[[solution]]
-=== Solution
-
-It turns out that the functionality that uses this is deprecated as of early
-2014, so this supposedly isn't an issue with newer puppet installs. However, if
-you're using an older puppet (3.0 or older), you might run into this problem.
-
-The problem lies in the database schema for the puppet console. Basically,
-every time a node checks in, it inserts a row into the database. The database
-has some tables with columns that auto-increment (0, 1, 2, 3, etc). If you have
-a lot of nodes reporting back frequently, this number will likely increase a
-lot over time. In our case, we have 333 nodes reporting every 30 minutes or
-more (we do development and thus we often manually run puppet agent with the -t
-switch). In our case, to hit 37,000, it would have taken a little over 2 days
-(30*(24*60)*333 = 1 day's checkin count)
-
-The columns that autoincrement use the int datatype. This datatype, as seen
-http://www.postgresql.org/docs/9.1/static/datatype-numeric.html[here], uses 4
-bytes. In case anyone doesn't remember, there are 8 bits in a byte, which means
-that +4 * 8 = 32+. That means that the maximum number that will fit
-in any column with the int data type is +2^(32-1)^+, which equals
-2,147,483,648. That means 2 billion puppet reports. It seems like a number not
-easy to achieve, but it is quite possible - we did it.
-
-The solution here is to change the data type on the columns in concern to be
-bigint rather than integer. Again, as documented by the postgres folks
-http://www.postgresql.org/docs/9.1/static/datatype-numeric.html[here], a bigint
-is 8 bytes, which is a 64 bit number. That means the largest it can hold is
-9,223,372,036,854,775,807 (about 9 quintillion). That said, let's get to it.
-
-
-[[executing-the-fix]]
-== Executing the Fix
-
-Before performing the fix, we should probably perform a backup of the database,
-unless you're the type who likes causing a fire you have to put out on a
-Friday, just like...
-
-image:files/Most_interesting_table_schemas.jpg[Compliments of Gimp,title="Compliments
-of Gimp"]
-
-To execute a backup (we'll assume your database name is **console**), run
-
-----
-pgsql_dump -U console -W -h localhost console > console.$(date '+%Y%d%m.%H%M').sql
-----
-
-Once that backup is complete (mine was 86 GB, so it took a while), shut down
-all of your puppet services to be safe. A list of services you might want to
-shut down can be found
-https://docs.puppetlabs.com/pe/latest/install_what_and_where.html#services[here].
-A general rule of thumb though is, anything in /etc/init.d/ that starts with
-_pe-_ is something that should be stopped, **excepting pe-postgresql**.
-
-Once that's done, execute this fun console trick.
-
-----
-$ psql -U console -W -h localhot
-
--- Change to the console table console=> \c console
-
--- This one might take a *very* long time (mine took an hour) console=>
-alter table resource_statuses alter column id type bigint; console=>
-alter table resource_events alter column id type bigint; console=> alter
-table resource_events alter column resource_status_id type bigint
-console=> \q
-----
-
-With that, restart the _pe-postgresql_ service for good measure. Once that's
-done restarting, start up the other pe-* services and everything should be
-working now.
-
-
-[[related-documentation]]
-== Related Documentation
-
-This is a bug that was reported about three years ago. They have since migrated
-ticket tracking systems, so the links can be found at...
-
-* (old ticket system) https://projects.puppetlabs.com/issues/9225
-* (new ticket system) https://tickets.puppetlabs.com/browse/PUP-1173
-
-
-Category:Linux
-Category:Automation
-Category:Puppet
-Category:Postgres
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Redirecting_a_WordPress_Site.ascii b/src/Redirecting_a_WordPress_Site.ascii
deleted file mode 100644
index 2f5c212..0000000
--- a/src/Redirecting_a_WordPress_Site.ascii
+++ /dev/null
@@ -1,48 +0,0 @@
-Redirecting a Wordpress Site
-============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Last week I decided to completely move my blog over to bitnode.net. I created a
-post in the old location with the details of the move and watched my viewer
-metrics slowly shift sites. However, Google is still indexing my old site and
-it is still showing up at the top of the results while this new blog is just
-hanging around on page four. This makes for a very sad day for all (or possibly
-just me).
-
-With that, I decided to do a redirect. There are several options for a
-redirect, but what I wanted was a bit different than the typical domain
-forward. I wanted to be able to keep my post locations, but have the domain
-name change. Since the paths to my posts are the same format between the old
-location and the new location, doing a simple append to the redirect url would
-suffice. Here's how I did it.
-
-At the top of my header.php file (yep, WordPress), I added the following
-lines...
-
-----
-<?php header("HTTP/1.1 301 Moved Permanently");
-header('Location: http://bitnode.net/'.$_SERVER["REQUEST_URI"]); exit();
-?>
-----
-
-Since this is in my header file, it loads on every page. What's nice about this
-solution is that it takes the path to the current page and appends it to
-bitnode.net redirect location, so if your source and destination URI formats
-are the same, it will be a seamless redirect.
-
-This solution works well, but does anyone have any other solutions that would
-also work? It never hurts to know all the different ways after all.
-
-*Edit:* As it turns out, you can do the same thing from WordPress to Drupal.
-They evidently support the same URI formats.
-
-
-Category:WordPress
-Category:PHP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Remote_Mounting_File_Systems_Through_SSH.ascii b/src/Remote_Mounting_File_Systems_Through_SSH.ascii
deleted file mode 100644
index 958a377..0000000
--- a/src/Remote_Mounting_File_Systems_Through_SSH.ascii
+++ /dev/null
@@ -1,69 +0,0 @@
-Remote Mounting Filesystems Through SSH
-=======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-Today I was wanting to edit a few image files on a remote machine. Now, when I
-typically need to transfer files across the internet, I will transfer them
-through sftp. I prefer this method simply because I already have an ssh server
-running on my target machine, so I don't need to install anything extra (such
-as ftp or samba).
-
-In light of this, I figured that since you can transfer files through an ssh
-tunnel, you must be able to remotely mount a file system through ssh.
-
-Enter sshfs
-
-I searched around a bit and the first thing I found was sshfs (ssh file
-system). It allows you to remotely mount files systems through ssh/fuse (yay).
-<pre> apt-get install sshfs </pre> Before we get around to actually mounting
-the remote filesystem, we need to change permissions on one thing so we can use
-this as a non-root user since we don't run GUIs as root (at least I hope you
-all don't). Let's add execute permissions for all to the fusermount command.
-
-----
-chmod +x /usr/bin/fusermount
-----
-
-Now that we have done that, we can proceed with mounting. I create a
-mount location in my home directory for ease of access.
-
-----
-mkdir ~/mount
-----
-
-Now that we have a place to mount our remote location,
-let's actually perform the dastardly deed.
-
-----
-sshfs <username>@<RemoteServer>:<RemotePath> <LocalMountPoint>
-----
-
-A good example of this is
-
-----
-sshfs jimneycricket@nowhereissomewhere:/home/jimneycricket ~/mount
-----
-
-It will ask you for a password. Supply the password and all should be well.
-Open up your file manager and navigate to \~/mount and you should see the files
-on your remote server (in this case, the home directory for jimneycricket).
-
-To unmount, you need to log in as root/sudo and run umount \~/mount.
-
-Finally, if you change the ports that ssh listens to on all of your ssh
-servers, you need to add one extra bit to the sshfs string. To connect to a
-port other than the default 22, put
-
-----
--p <port>
-----
-
-just after sshfs and you'll be set.
-
-Yay for seamless mounting!
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Replacing_the_Glass_on_a_Samsung_Galaxy_S_iii.ascii b/src/Replacing_the_Glass_on_a_Samsung_Galaxy_S_iii.ascii
deleted file mode 100644
index c79fdd7..0000000
--- a/src/Replacing_the_Glass_on_a_Samsung_Galaxy_S_iii.ascii
+++ /dev/null
@@ -1,133 +0,0 @@
-Replacing the Gladd on a Samsung Galaxy S III
-=============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently had the misfortune of shattering the glass on my phone's screen. I
-went to put it in my pocket, I felt one corner of the phone go in, but
-apparently that wasn't enough because my phone fell out of my pocket instead of
-in. There's this funny thing about gorilla glass. Apparently, if you strike it
-just right at one of its weak spots, the corners, it's enough to shatter [hopes
-and dreams] it into many, many pieces. Thankfully though, someone produces a
-replacement piece of gorilla glass to replace and hopefully restore said broken
-dreams...er...glass.
-
-
-[[finding-the-parts]]
-== Finding the Parts
-
-Really all you need for this is a blow drier or heat gun, a sharp and/or thin
-knife, a credit card or a guitar pick which you don't care about, and the
-replacement glass. If you have a infrared thermometer, you might want to use
-that to gauge the temperature as you go, but I doubt it's necessary. If you do
-have one though, I heated my phone's screen to about 200&deg; F.
-
-So with that, I went on the interwebs to find a replacement screen, which to my
-surprise, costs about $260 on Amazon. As it turns out, screens for the Galaxy S
-iii are incredibly costly because with this phone, screen means glass AND
-digitizer since they are glued together. What I needed was just the glass. I
-searched Amazon some more and stumbled upon
-http://www.amazon.com/Samsung-GT-i9300-Display-included-Replacement/dp/B0096TUA0G/ref=pd_ys_sf_s_2335752011_a1_5_p[this
-one]. When I bought that one week ago, it cost exactly $23.01. Today, just over
-a week later, it costs $34.99. It looks like this guy might be drop shipping
-because the price for
-http://www.ebay.com/itm/Replacement-Screen-Glass-Lens-for-Samsung-Galaxy-SIII-S-3-i9300-White-TOOLS-/180936781341[the
-screen on Ebay] also went up in the last week from about $20 to $28.99 WITH the
-tools. Either way though, buying just the glass is still a $200 cheaper than
-buying the entire digitizer and glass assembly.
-
-Now, I'd like to note before going on that, yes, it is cheaper to replace just
-the glass on your phone, but it's also more dangerous. You could scratch up
-your digitizer pretty bad or shatter it in the process of getting the glued
-glass off. The way I look at it though, why not try because if you fail you'll
-have spent $260 + $30, but if you succeed you will have saved yourself over
-$200.
-
-
-[[some-precautions]]
-== Some Precautions
-
-Before you start, pull off your back cover and remove your battery, sim card
-(if you have a gsm phone), and micro sd card.
-
-
-[[the-instructions]]
-== The Instructions
-
-Now with that, the Youtube video I watched to give me an idea of what to expect
-can be found
-http://www.youtube.com/watch?annotation_id=annotation_515371&feature=iv&src_vid=aZuiCsAtThw&v=W4Gx5fLy0NQ[here].
-
-
-[[some-tips]]
-== Some Tips
-
-I used a blow drier to heat my phone up. I found that the glue was workable at
-about 180&deg;F. I never got my phone above about 205&deg;F.
-
-Start at the top of the phone if possible. The bottom has some wires that
-attach under the glass to the back and menu buttons and you don't want to break
-those.
-
-Once you get the glass up, use the credit card, guitar pick, or other flat soft
-plastic object to keep the glass up and work the glue out.
-
-I was surprised to find that the digitizer is actually a pretty durable layer.
-There were a few things I did that I thought for sure were going to shatter it,
-but it was completely fine. I ended up with only one real nick (thankfully).
-
-Finally, it took me about two hours and thirty minutes to complete because my
-glass was so shattered.
-
-
-[[the-gallery]]
-== The Gallery
-
-____
-image:files/Img_2335_gsiii-no-glass-sm.jpg[height=400,link="files/Img_2335_gsiii-no-glass-sm.jpg"]
-
-The phone with its glass pulled off in a pile next to it. I used the knife to
-jimmy the glass up on the sides so I could get tool in.
-____
-
-____
-image:files/Img_2337_gsiii-no-glass-dirty-sm.jpg[height=400,link="files/Img_2337_gsiii-no-glass-dirty-sm.jpg"]
-
-A close up so you can see that tacky glue rolled up all over the place.
-____
-
-
-____
-image:files/Img_2338_gsiii-glass-pile-sm.jpg[height=400,link="files/Img_2338_gsiii-glass-pile-sm.jpg"]
-A pile of wondrous gorilla glass
-____
-
-
-____
-image:files/Img_2343_gsiii-no-glass-clean-sm.jpg[height=400,link="files/Img_2343_gsiii-no-glass-clean-sm.jpg"]
-
-A clean digitizer!
-____
-
-____
-image:files/Img_2344_gsiii-new-glass-sm.jpg[height=400,link="files/Img_2344_gsiii-new-glass-sm.jpg"]
-
-Finally got the glass on.
-____
-
-____
-image:files/Img_2348_gsiii-new-glass-and-case-sm.jpg[height=400,link="files/Img_2348_gsiii-new-glass-and-case-sm.jpg"]
-
-The new case so I don't shatter my NEW glass too.
-____
-
-
-Category:Samsung
-Category:Phones
-Category:Sprint
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii b/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii
deleted file mode 100644
index b696e5b..0000000
--- a/src/Running_Load_Tests_with_a_Remote_VSTS_Controller_and_Associated_Agents.ascii
+++ /dev/null
@@ -1,51 +0,0 @@
-Running Load Tests with a Remote VSTS Controller and Associated Agent
-=====================================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Running a multi-agent load test isn't too complicated, compliments of Visual
-Studio Team Suite. Assuming you have a controller/agent environment set up,
-running the actual test requires a bit of modification to the test project so
-the local test machine doesn't run the test itself; rather it runs them on a
-remote machine (the controller and its agents). To set this up...
-
-[[load-in-your-test-project]]
-Load in your test project
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-image:files/01_Open_Test.jpg[height=300]
-
-* At the top of the window, select *Test -> Edit Test Run Configurations ->
- Local Test Run (localtestrun.testrunconfig)*
- +
- image:files/02_Edit_Test_Run_Configurations.jpg[height=250]
-
-* Select *Controller and Agent* at the top left. Select the '''Remote''' radio
- button. Select the controller.
- +
- image:files/03_Select_Controller.jpg[height=350]
-
-* Click **Apply**. Once you have selected Apply, you will receive a prompt
- saying
- +
- image:files/04_Answer_Prompt.jpg[height=140]
-
-* Click *Ok*
-
-* Click *Close*
-
-Once all of that is done, it's time to run your test. You'll notice that once
-your test has been run, at the bottom left side of the results you'll see a
-reference to the remote controller and that it controlled X many agents.
-
-Happy Testing!
-
-
-Category:Microsoft
-Category:Visual_Studio
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Running_Web_Services_on_Non-Standard_Ports.ascii b/src/Running_Web_Services_on_Non-Standard_Ports.ascii
deleted file mode 100644
index 5f9eaf0..0000000
--- a/src/Running_Web_Services_on_Non-Standard_Ports.ascii
+++ /dev/null
@@ -1,66 +0,0 @@
-Running Web Services on Non-standard Ports
-==========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Working in the world of systems administration has taught me a lot, especially
-in regards to security. One thing I hope to never take for granted is the
-seemingly endless pool of knowledge there is in IT departments. There's almost
-always something new to learn from someone.
-
-Since I have been learning so much from other people, I decided to rebuild my
-web server in an attempt to harden it a bit and to practice building stuff (who
-doesn't like building stuff, I mean come on...Legos anyone?). One of the things
-I changed in my process was building everything from source with non-privileged
-users rather than installing it from repos. One of the advantages to doing this
-is that each of your services will be running as users that have no access to
-the rest of the system if their accounts are set up right (ie: no sudo, ssh, or
-cross service access). The one disadvantage to this is that the services can't
-bind to ports 1024 and below. For web servers, this really only affects apache,
-nginx, light httpd, or whatever web server you are using since most other
-software (ie: php, mysql, etc) runs on ports higher than 1024.
-
-With that, people don't visit our websites on some randomly selected port for a
-web server, do they?
-
-Nope
-
-So how do we allow them to visit our web server running on a different port
-other than 80?
-
-The answer is iptables using NAT. Basically what we need to do is take incoming
-traffic to port 80 and route it to our web server port (in my case, this is
-8080). This of course can work for other services as well, but for the purposes
-of this post, we'll simply translate port 80 traffic.
-
-The iptables commands you'll need for this are as follows:
-
-----
-iptables -A PREROUTING -p tcp -m tcp --dport 80 -j REDIRECT --to-ports 8080
-----
-
-What we've got here is not super hard. Basically, before we do anything else
-(PREROUTING chain) with our port 80 (--dport 80) tcp (-p tcp -m tcp) network
-traffic, we want to redirect (-j REDIRECT) the traffic to port 8080 (--to-ports
-8080). You can of course do this with https traffic as well. Here's another
-example using that one.
-
-----
-iptables -A PREROUTING -p tcp -m tcp --dport 443 -j REDIRECT --to-ports 8443
-----
-
-Pretty handy, huh?
-
-One note on this before signing off. If you have your input table set to drop
-all, you need to add an accept rule for tcp port 80 and your web server port
-(8080 and 8443 in the examples).
-
-
-Category:Linux
-Category:iptables
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/SQL_2008_Reinstall_Errors.ascii b/src/SQL_2008_Reinstall_Errors.ascii
deleted file mode 100644
index 2cb6715..0000000
--- a/src/SQL_2008_Reinstall_Errors.ascii
+++ /dev/null
@@ -1,91 +0,0 @@
-SQL 2008 Reinstall Errors
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hello again all, Recently, after the server build was 'finished', I discovered
-that the SQL install was not configured to use the proper authentication method
-or service accounts (oops) and without mixed mode authentication enabled,
-windows authentication could not be used to log in to sql to fix these things.
-That being said, I had to uninstall SQL 2008 (standard edition) and do a
-reinstall to correct these issues. Time to grab some popcorn and a drink and
-sit back to watch that entertaining progress bar as it slowly creeps across the
-tiny 800x600 virtual console window.
-
-I configured the SQL install and ran into an ambiguous error (how typical).
-
-----
-This access control list is not in canonical form and therefore cannot be modified.
-----
-
-How quaint. Thankfully, after searching for a few minutes with our friend
-Google, I stumbled upon a Microsoft feedback article that seemed to contain my
-answer.
-
-Here's what needs to be done.
-
-Navigate in an explorer window to
-
-C:\Program Files\Microsoft SQL Server\100\Setup Bootstrap\Log
-
-The link at the end of this post to the Microsoft feedback article says that
-from there you open up the "**details.txt**" file. I found that that folder
-actually contained 11 folders (from the current install and the previous
-install) and a file called "summary.txt". I found the right "**details.txt**"
-file in the most recently created folder.
-
-Once you've located the right "details.txt" file, open it up in notepad (or
-your editor of choice) and scroll to the end of the file (it's pretty big so
-use the scroller bar). Near the end, you should see some text that looks
-similar to...
-
-----
-2009-05-30 18:02:40 Slp: Sco: Attempting to set directory full path
-2009-05-30 18:02:40 Slp: Sco: Attempting to normalize directory path C:Program FilesMicrosoft SQL Server100COM
-2009-05-30 18:02:40 Slp: Sco: Attempting to check if directory C:Program FilesMicrosoft SQL Server100COM exists
-2009-05-30 18:02:40 Slp: Sco: Attempting to set security descriptor for directory C:Program FilesMicrosoft SQL Server100COM, security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
-2009-05-30 18:02:40 Slp: Sco: Attempting to check if directory C:Program FilesMicrosoft SQL Server100COM exists
-2009-05-30 18:02:40 Slp: Sco: Attempting to normalize security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
-2009-05-30 18:02:40 Slp: Sco: Attempting to replace account with sid in security descriptor D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
-2009-05-30 18:02:40 Slp: ReplaceAccountWithSidInSddl -SDDL to be processed: D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
-2009-05-30 18:02:40 Slp: ReplaceAccountWithSidInSddl -SDDL to be returned: D:(A;OICI;FRFX;;;S-1-5-80-3263513310-3392720605-1798839546-683002060-3227631582)
-2009-05-30 18:02:40 Slp: Prompting user if they want to retry this action
-----
-
-The text you're looking for is the directory path listed after the text
-
-----
-Attempting to normalize directory path
-----
-
-Open up another explorer window and navigate to (not inside) the directory that
-is specified after the previous quote. Right click the directory (in this case,
-the directory is COM within the directory 100) and select "*" tab. Windows
-should give you an error that says something along the lines of the permissions
-being out of order and might not be effective (sorry...I forgot to copy that
-error).
-
-Click "*" window to close it out as well.
-
-Go back to your installer now and click "*" on the error window.
-
-I had to fix two directories. The guy in the Microsoft feedback article said he
-had to fix five directories. That being said, this may need to be done more
-than once.
-
-That about sums this up. The article I found that helped me get started fixing
-this can be found here:
-
-http://connect.microsoft.com/SQLServer/feedback/ViewFeedback.aspx?FeedbackID=355216
-
-Dirk
-
-
-Category:Microsoft
-Category:MsSQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/SQL_Server_2008_Memory_Management.ascii b/src/SQL_Server_2008_Memory_Management.ascii
deleted file mode 100644
index 4125534..0000000
--- a/src/SQL_Server_2008_Memory_Management.ascii
+++ /dev/null
@@ -1,72 +0,0 @@
-SQL Server 2008 Memory Management
-=================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Once again, hello all: Recently I had a problem with SQL server. I was sifting
-through the processes in Task Manager a few days ago ordered by memory
-consumption. At the top of the list for memory consumption was SQL Server
-(sqlserver.exe) weighing in at 200 megabytes of memory. I decided to look past
-that one since 200 megabytes isn't too unreasonable for SQL, especially when
-it's hosting the data for quite a few SharePoint web applications.
-
-Today, I checked again. After my server had been online for two and a half
-days, SQL server had grown to over 650 megabytes of memory (653,224 KB
-specifically). Seeing as how I have not made any changes to my local SharePoint
-environment in that time (I'm currently developing a non-SharePoint related
-project), I decided to look into putting a cap on the memory consumption of
-SQL. Originally I had 2 gigabytes of ram for my server. I added an extra
-gigabyte to that and SQL took up the additional space.
-
-As it turns out, one can put a maximun and a minimum limit on SQL. Here's how.
-
-Open up SQL Server Management Studio 2008
-
-Type in the information to connect to the server that has SQL server running on
-it and click connect.
-
-Right click the server name
-
-image:files/MgmtStudio1.jpg[height=400]
-
-Click Properties
-
-Select Memory on the left side of the window that comes up
-
-image:files/MgmtStudio2.jpg[height=400]
-
-Under Server Memory Options, adjust the minimum and maxiumum memory settings to
-what you need.
-
-Click OK
-
-Right Click the server name again
-
-Select Stop from the menu
-
-Click necessary buttons to get through the prompts
-
-Right Click the server name yet again
-
-Select Start from the menu
-
-Click the necessary buttons to get through the prompts
-
-And that's it. Mine (as in the screenshots) has yet to go over 300 megabytes of
-memory consumption.
-
-Thanks for reading.
-
-
-Dirk
-
-
-
-Category:Microsoft
-Category:MsSQL
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/SSH_Tunnel_Forwarding.ascii b/src/SSH_Tunnel_Forwarding.ascii
deleted file mode 100644
index 171262d..0000000
--- a/src/SSH_Tunnel_Forwarding.ascii
+++ /dev/null
@@ -1,73 +0,0 @@
-SSH Tunnel Forwarding
-=====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Yesterday, I had an idea that remarkably enough, actually worked (go figure,
-huh). I have a few friends who use Linux on their desktops but aren't quite
-Linux gurus (but who am I kidding, neither am I as evidenced by this post).
-Don't get me wrong of course, I'm super proud to have friends that aren't IT
-people but use Linux on their desktops. That speaks a lot to the quality of
-the work the Linux community has produced.
-
-Despite the whole Linux thing, they still occasionally have issues and call me
-for help. Most of the time, I just need GUI access to troubleshoot router
-issues on their side or something like that. Now, telling someone how to port
-forward and open up firewall ports on a router you don't know just so you can
-directly connect to their laptop/desktop through ssh can be really painful over
-the phone most of the time.
-
-
-[[enter-the-brick-that-hit-me-in-the-head-yesterday...]]
-== Enter the brick that hit me in the head yesterday...
-
-I was driving to lunch yesterday and began wondering if it would be possible to
-have two computers tunnel to a central server on the same port and in essence,
-forward traffic between the ports. As it turns out, this actually works (!!!),
-and it's really easy too.
-
-So, for our example we'll have three computers Me, Nexus, and Douglas (you know
-who you are). Nexus is our central server that's accepting ssh connections and
-Douglas is my friend that needs help. It doesn't matter which order these
-connections need to be made in. Additionally, we're going to assume that our
-friend's vnc server is set up and listening on 5901.
-
-First (not really), you need to connect to the central server
-(nexus.example.com for our example). To do this, open a terminal and type
-
-----
-ssh -L 5901:localhost:5901 me@nexus.example.com
-----
-
-Second (again, not really), our good friend Douglas needs to connect to the
-nexus as well. To do that, he needs to open a *reverse* tunnel to the nexus
-using the following command:
-
-----
-ssh -R 5901:localhost:5901 douglas@nexus.example.com
-----
-
-Open your VNC client and connect to localhost:5901 and you should be golden!
-
-Please take note of the differences in the two commands we just used. The only
-difference (aside from the usernames) is the switch used for the tunnel. The
-*-L* establishes a standard tunnel and the *-R* establishes a reverse tunnel,
-which allows the traffic to be forwarded to another tunnel connected on the
-same port.
-
-There is one security issue with this that could potentially cause you grief if
-you don't own the central server. If you don't own the box exclusively, other
-users on the box could also connect to the reverse tunnel. If you do own the
-box though, this shouldn't be an issue for you.
-
-_Insert clever post ending here_
-
-
-Category:SSH
-Category:VNC
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/SSH_VPN.ascii b/src/SSH_VPN.ascii
deleted file mode 100644
index f5eef2b..0000000
--- a/src/SSH_VPN.ascii
+++ /dev/null
@@ -1,89 +0,0 @@
-SSH VPN
-=======
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Nope, I didn't just yell at you using all caps in the subject. Just for the fun
-of it, let's expand that one out.
-
-"Secure Shell Virtual Private Network"
-
-That sure sounds like a phrase you'd hear in some bad hacker movie.
-
-All sarcasm aside, this is probably one of the coolest things you can do with
-SSH in my opinion. I wrote link:SSH_Tunnel_Forwarding[ a post] about this a
-ways back, but it was limited only to forwarding and reverse forwarding SSH
-tunnels. I recently discovered though that SSH can open this cool thing called
-a http://en.wikipedia.org/wiki/SOCKS[SOCKS proxy] (short for Socket Secure
-Proxy) when using the *-D* switch. SOCKS proxies, unlike SSH tunnels, allow you
-to funnel all protocols/traffic through this one port, just like a VPN. The one
-downside is to use this for everything, you either have to be masterful with
-iptables, have http://sourceforge.net/projects/tsocks/[tsocks] installed, or
-have the BSD version of netcat installed to work some magic.
-
-
-[[real-application]]
-== Real Application
-
-At work this comes in handy because of the way the networks are set up.
-Avoiding all bias about how right or wrong our networks are configured, I often
-need to connect to a particular remote system that sits in a subnet accessible
-only through two jump systems ( jump0 -> jump1 -> destination ). The only way
-for me to get into that subnet is through two jump boxes. Jump box 1 is only
-accessible from jump box 0 and the remote system I need access to is only
-accessible from jump box 1. That means to get to my remote system, I need to
-ssh to jump box 0, from there ssh to jump box 1, and from there ssh to my
-destination system. This is really cumbersome when I need to work on multiple
-systems within this far off subnet.
-
-Using an SSH SOCKS proxy though, I can have everything set up so I don't have
-to keep opening three nested SSH sessions just to access a single box. Here's
-how it's done.
-
-
-[[how-its-done]]
-== How it's Done
-
-* SSH to jump box 0 using the following command
-** +ssh -L 1080:localhost:1080 jiminy@jump0+
-* Using the previously established session, ssh to jump box 1 using the
- following command
-** +ssh -D 1080 jiminy@jump1+
-
-We now have two nested ssh sessions. The first forwards remote port 1080 to
-localhost:1080. The second ssh command opens a SOCKS proxy on jump box 0
-through to jump box 1. Remember how port 1080 is forwarded to our local box
-with the first ssh session?
-
-Now, just open an ssh session to any system that is only accessible from jump
-box 1 and your traffic will be forwarded straight on through.
-
-----
-tsocks ssh jiminy@WayFarOut
-----
-
-Yay!
-
-
-[[one-last-thing...]]
-== One Last Thing...
-
-There was one thing I discovered that was problematic for me on jump box
-0. It turns out that the default configuration for SSH won't allow
-forwarding of SSH traffic. If you're seeing an error like this
-
-----
-channel 0: open failed: administratively prohibited: open failed
-----
-
-...you need to set *PermitTunnel* in /etc/sshd_config to _yes_ on any boxes
-forwarding the SOCKS proxies.
-
-
-Category:SSH
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Samsung_Epic_4g_Syndicate_Rom_:_Freeze_on_Boot.ascii b/src/Samsung_Epic_4g_Syndicate_Rom_:_Freeze_on_Boot.ascii
deleted file mode 100644
index bdcffb8..0000000
--- a/src/Samsung_Epic_4g_Syndicate_Rom_:_Freeze_on_Boot.ascii
+++ /dev/null
@@ -1,40 +0,0 @@
-Samsung Epic 4g Syndicate Rom:Freeze on Boot
-============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Recently my girlfriend (who might start blogging Epic 4G rom reviews here)
-asked me to root her phone due to the horrendous battery issues the Samsung
-Epic 4g/Sprint stock rom causes. After searching around a bit, I finally
-decided upon a one click root posted
-http://samsungepichacks.com/samsung-epic-hacks/how-to-root-samsung-epic-4g/[here].
-The root went with no snags until I tried to flash Syndicate Rom 1.1.1. The
-flashing process indicated there were no issues until I rebooted the phone. It
-got stuck in boot. This presented an unfortunate problem for me. As it turns
-out, USB support for version 2.5 of Clockworkmod on the Epic 4g is not
-functional. Through a very complicated process of pulling the sd card, putting
-it in my HTC Evo, copying the files through that, and putting the sd card back
-into her phone, I tried different downloads of Syndicate Rom with none of them
-working.
-
-Then it dawned on me...
-
-The Ext 4 filesystem was introduced into Android at version 2.2 the version
-that Syndicate builds on. After some research, I discovered that Clockworkmod
-doesn't support Ext 4 until version 3. With that, I searched for Clockworkmod 3
-for the Epic 4g. I flashed version 3.1 and reflashed Syndicate Rom and all was
-well.
-
-There was much rejoicing
-
-On a related note, I also discovered that if you format the Epic 4g sd card
-from Clockworkmod 3, it runs much faster. I can only guess that this is because
-the sd card is originally formatted with Samsung's proprietary file system, RFS
-(robust file system... see last entry http://en.wikipedia.org/wiki/RFS[here]).
-
-
-Category:Android
-
-// vim: set syntax=asciidoc:
diff --git a/src/Scheduling_Jobs_in_Linux.ascii b/src/Scheduling_Jobs_in_Linux.ascii
deleted file mode 100644
index 2bfc61b..0000000
--- a/src/Scheduling_Jobs_in_Linux.ascii
+++ /dev/null
@@ -1,73 +0,0 @@
-Linux:Scheduling Jobs
-=====================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitile}
-
-I was recently needing to schedule the reboot of a print server (Ubuntu), but
-was unsure how. After looking around a bit, I can do all kinds of things with
-it. For a simple use case though, here is how to reboot a server with at...
-
-Since we're restarting the server, we need root privileges for that, so
-we have to run this as sudo this time around. type:
-
-----
-sudo at 23:00
-----
-
-You should see...
-
-----
-warning: commands will be executed using /bin/sh
-at>
-----
-
-Type the command you want to occur at 23:00 (11:00 pm). In our case, we're
-going to restart the server.
-
-----
-shutdown -r now
-----
-
-Press **enter**. From here you press *ctrl+d* and that will save the job for
-later execution.
-
-The cool thing about at is how intuitive it is. For instance, we just used
-23:00 to schedule an e-mail for 11:00 pm. Instead, we could have typed **at
-11:00 pm**. Furthermore, if we wanted to schedule something for tomorrow at
-11:00 pm, we could type **at 11:00 pm tomorrow**. It's a pity for those of us
-who are forgetful...the "at" utility unfortunately does not understand
-yesterday.
-
-That's how to do it.
-
-If you want to list all of the jobs for your user, use the command
-**atq**. If you need to remove a job, use the *atrm* command (this uses
-job numbers from the list atq produces).
-
-Happy scheduling.
-
-... Really, be happy. At least you don't have to be there at 3:00 am to reboot
-the server.
-
-----
-sudo at 3:00 am a> shutdown -r now
-----
-
-* ctrl+d*
-
-...later at 11:30 pm
-
-----
-echo 'ZZZZzzzzzzzzzz...'
-----
-
-
-Category:Linux
-
-Category:Cron
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Screenshots_from_Command_Line.ascii b/src/Screenshots_from_Command_Line.ascii
deleted file mode 100644
index fdb8512..0000000
--- a/src/Screenshots_from_Command_Line.ascii
+++ /dev/null
@@ -1,40 +0,0 @@
-Screenshots from Command Line
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Today I was wanting to screenshot some work I had done on a vector image inside
-of the window. Now, I have a pretty minimalistic install on my box. Due to this
-I didn't have a screenshot application aside from http://gimp.org[The Gimp]...
-or so I though.
-
-Like almost everything else in Linux, it turns out you can take screenshots
-from the command line. To do this you use the import command.
-
-----
-import image.jpg
-----
-
-This will change your cursor to a plus symbol. Click the window you want to
-screenshot and it'll save it to the current directory.
-
-You may notice however that if your window isn't in the foreground, it may
-require two or more clicks to get the window you want up so you can screenshot
-it. To do this, we simply need a delay.
-
-----
-import -pause 4 image.jpg
-----
-
-The -pause switch will delay the screenshot by the duration specified. In the
-example, we delay it for four seconds. Once the delay is up, again you will see
-the mouse cursor change to a plus symbol. Select the window you want to
-screenshot and it will save it to the current directory, unless you have
-specified a different one to save to.
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Scripting_Wma_to_Ogg_Conversion_in_Linux.ascii b/src/Scripting_Wma_to_Ogg_Conversion_in_Linux.ascii
deleted file mode 100644
index 9353862..0000000
--- a/src/Scripting_Wma_to_Ogg_Conversion_in_Linux.ascii
+++ /dev/null
@@ -1,57 +0,0 @@
-Scripting Wma to Ogg Conversion in Linux
-========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctype}
-
-It's hard for me to believe that I used to be a Microsoft/Windows fanboy. I
-used all their products and signed up for all their beta programs. Now, I'm a
-full time Linux user and open source fanboy (which happens to be a bit of an
-understatement). In my transition from Windows to Linux though, one thing I
-delayed doing was converting my music library to a non-proprietary format
-(wma). A few months back though, I finally decided to make the jump. After
-investigating, I finally decided on using ogg as my final format. I went back
-and re-ripped all of my old CDs, but there were some I couldn't find, so I
-needed to convert the wma files to ogg. Now, there is the unfortunate downside
-of converting a compressed format to a compressed format, so I converted to a
-very high quality ogg format in my script to hopefully not lose too much (so
-far everything sounds pretty good).
-
-[[requirements]]
-== Requirements
-
-All you need for this is oggenc and mplayer (yay).
-
-
-[[the-script]]
-== The script
-
-----
-#!/bin/bash
-for file in ./*.wma; do
- wavname=${file%.wma}.wav;
- wavname=${wavname:2};
- mplayer "$file" -ao pcm:file="$wavname";
- oggname=${wavname%.wav}.ogg;
- oggenc "$wavname" "$oggname";
- rm "$wavname";
-done
-----
-
-
-[[what-just-happened]]
-== What just happened?
-
-So what we just did was start up a for loop for each file in the working
-directory that ends with wma (*). Once we've done that, we remove the
-ridiculously large wav file leaving us with only the original wma and the
-converted ogg for your (hopefully positive) comparison.
-
-
-Category:Linux
-Category:Music
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Searching_for_a_Command_in_Linux.ascii b/src/Searching_for_a_Command_in_Linux.ascii
deleted file mode 100644
index 51bb6b8..0000000
--- a/src/Searching_for_a_Command_in_Linux.ascii
+++ /dev/null
@@ -1,97 +0,0 @@
-Searching from Command Line in Linux
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-For those of us learning command line for the first time, it can be a pretty
-intimidating thing. A terminal in the hands of a person with a black belt in
-shell commands can be far more powerful than a GUI (graphical user interface).
-However, becoming a so-called black belt takes a lot of time and practice.
-Unlike a GUI, you have to memorize the commands you need...mostly.
-
-I say mostly because there is a command in Linux that is probably one of the
-singly most used commands out there. That command is known as the *man*
-command. In interviews in response to a question you don't know, "I would just
-read the man pages" is the equivalent of "Jesus is the answer to everything" in
-church. The great thing is both actually work (not to put an obvious religious
-statement in my blog here).
-
-Man is short for manual. It's like reading your car manual, but for a command
-in your shell. For instance, if you run
-
-----
-man mkdir
-----
-
-You see something an explanation of what the command does, how to use the
-command, and the various advanced features you can do with it.
-
-But what ifyou don't know what command to use?
-
-Thankfully, there is a relatively simple solution to this. All you really know
-is how to describe what you want to do in a simplistic way. Ladies and
-gentlemen, that command is the man -k command. I hope you all didn't pay full
-price for your seats because you'll only be using the edge.
-
-
-[[man--k-command]]
-== man -k <command>
-
-The man -k command/switch searches all of the commands that have man
-pages for what you typed in to search for. It then returns the command
-name with a short explanation of what it does. Let's get some practice
-in.
-
-Say you want to search for how to create a directory. We're going to run
-
-----
-man -k "make directories"
-----
-
-And it will return
-
-----
-mkdir  (1)              make directories
-----
-
-Cool, huh? Now, there is a complication to this. If you want to search
-for something and the exact text you type isn't in the manual exactly as
-you typed it, it will not be returned. For instance...
-
-----
-man -k "create directory"
-----
-
-...will return nothing becuase the manual for mkdir has "make directories" in
-it, not "create directory". How do we get around this?
-
-Wild cards and very simple one word searches.
-
-
-[[wild-cards]]
-== Wild Cards
-
-Now, let's say you're not sure if the manual you're looking for has the word
-directories, directory, or just dir in it. We need a way to search for multiple
-forms of a word. We do this with what is called a wild card character. Run the
-following command: <pre>man -k dir*</pre>
-
-This will search the manuals for any words that start with dir and end with
-anything (including spaces or other words).
-
-Once you've found the command you want, you can simply type <pre>man
-<command></pre> and you can read the manual until you're heart is content, or
-even until it stops!  :)
-
-On a similar note, to get out of reading a manual, **press the letter 'q**'. I
-can't tell you how long it took me to figure that out when I first was learning
-about the man pages. I guess now I should be ashamed of myself.
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Securing_a_Postfix_Smtp_Server.ascii b/src/Securing_a_Postfix_Smtp_Server.ascii
deleted file mode 100644
index 79f20f5..0000000
--- a/src/Securing_a_Postfix_Smtp_Server.ascii
+++ /dev/null
@@ -1,226 +0,0 @@
-Securing a Postfix Smtp Server
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I must start this post with the acknowledgement that I know only what I've
-experienced on this topic.
-
-I recently set up my own mail server for the fun of it. I figured it was
-something I'd never done, so why not, right?
-
-Well, one day later, spammers discovered my server and began using it to send
-out spam mail (curse you spammers!). I didn't notice this until I received a
-notification from my hosting provider that my network IO was over the threshold
-I had set. I promptly logged in, tailed the mail logs and discovered
-unbelievable amounts of mail being rejected by Google, Yahoo, Aol, and Hotmail.
-Why? Spam.
-
-With that, I spent the next day figuring out how to better secure my smtp
-server. I'd like to detail some of the exploits that the spammers used to get
-in to my server, how I failed in configuring my server properly, and how I
-fixed it.
-
-[[leaving-an-open-relay]]
-Leaving an Open Relay
-~~~~~~~~~~~~~~~~~~~~~
-
-An open relay is basically an smtp server that requires no authentication
-and/or allows connections from outside ip addresses, so anyone can send emails
-from anywhere to anywhere. The settings in question specific to this issue in
-my configuration were the following:
-
-----
-smtpd_recipient_restrictions = permit_mynetworks, check_relay_domains
-...
-mynetworks = 0.0.0.0/0 127.0.0.0/8 [::fff:127.0.0.0]/104 [::1]/128
-----
-
-Basically that is an open relay. Here's why.
-
-* Firstly, *smtpd_recipient_restrictions = permit_mynetworks* allows any
- email to be sent without any restrictions as long as the email originated
- from a box in the IP ranges specified in the mynetworks variable.
-
-* Secondly, *mynetworks = 0.0.0.0/0* allows emails to be sent through my
- smtp server from any client within the ip range of 0.0.0.0-255.255.255.255.
- This is bad because any computer can try to send emails through my smtp
- server and succeed because of the permit_mynetworks restriction (or lack
- therof).
-
-[[specifying-incorrect-configuration-parameters]]
-Specifying Incorrect Configuration Parameters
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-One of my first mistakes when configuring Postfix was misspelling some smtpd
-parameters using smtp_ instead of smtpd_ to prefix them. As it turns out, if
-you do this, Postfix ignores your attempted configuration without a peep. This
-one went on for a long time before I noticed that two of my smtpd_ fields were
-missing the 'd'. As soon as I put those in there, everything started working as
-it should, albeit still insecure, but at least it was following the
-specifications of my config file.
-
-
-[[not-specifying-a-correct-smtpd_sasl_path]]
-Not Specifying a Correct smtpd_sasl_path
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This one took me a while. The *smtpd_sasl_path* is a path to the socket file
-for your SASL server. In my case, this is Dovecot.
-
-As it turns out, Postfix defaults to running in chroot mode which makes its
-root directory /var/spool/postfix/. This was my first mistake. I was specifying
-
-----
-smtpd_sasl_path = /var/spool/postfix/private/auth-client
-----
-
-and it was not starting up because it couldn't find the socket file. This was
-because it was looking for the file at
-/var/spool/postfix/var/spool/postfix/private/auth-client a path which clearly
-does not exist. The solution to this is to simply specify a relative path.
-
-----
-smtpd_sasl_path = private/auth-client
-----
-
-I decided that I would get smart though and shave off some text from the field
-value by configuring Dovecot to place the socket file at
-/var/spool/postfix/auth-client rather than at
-/var/spool/postfix/private/auth-client (speaking in absolute terms despite
-running in chroot mode). This returned the following error
-
-----
-warning: when SASL type is "dovecot", SASL path "auth-client" should be a socket pathname
-----
-
-As it turns out, postfix won't operate with the SASL socket file path outside
-of the private directory. So with that, I placed my auth-client file back in
-the private directory and Postfix started up fine.
-
-
-[[not-specifying-the-allowed-senders-file]]
-Not Specifying the Allowed Senders File
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Even if you do have authentication required, you still need to specify which
-users can send email with what addresses. This was a bit of a surprise to me
-initially because I was under the impression that a password is associated with
-an email address, not an email address(s) associated with a username and
-password. To keep users from being able to send email as addresses that are not
-theirs (specifically randomly generated addresses in my case), you need to
-create a mapping file that maps usernames to the addresses they are authorized
-to send mail as. In my case, this is a one to one relationship (one address per
-username). Before my example I'd like to note that the filename is not
-required to be the one I use (though my filename is the one used in the Postfix
-setup documentation).
-
-Okay. Let's create the map file. To do this, open up and edit
-/etc/postfix/controlled_envelope_senders (this file likely doesn't exist yet)
-
-----
-vim /etc/postfix/controlled_envelope_senders
-----
-
-Once you've got that open, you simply need to put the maps in there.
-
-----
-# envelope sender owners jcricket@example0.com jimminey
-----
-
-Now that we've done that, we need to turn it into a binary. Run the following
-command and it will generate a <filename>.db binary map file in the same
-directory as the original file.
-
-----
-postmap /etc/postfix/controlled_envelope_senders
-----
-
-Presto! Now the user jimminey can send email as jcricket@example0.com. However,
-so can everyone else...still.
-
-Now that we have our controlled envelope senders file, we need to reference it
-in our postfix main.cf and set postfix up to restrict access to the maps
-specified in that file. Crack er open in your favorite editor and put the
-following line in somewhere after *smtpd_sasl_auth_enable*
-
-----
-smtpd_sasl_auth_enable = yes
-...
-# This line specifies our map file for use by postfix
-# Note that this does NOT reference controlled_envelope_senders.db
-smtpd_sender_login_maps = hash:/etc/postfix/controlled_envelope_senders
-# This line sets postfix to reject anyone who authenticates but tries to send email as an address they aren't permitted to use
-smtpd_recipient_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject_unauth_destination
-----
-
-So what we've just done is tell Postfix where our map file is
-(smtpd_sender_login_maps). After that, we tell Postfix to reject any users that
-have been authenticated but are trying to send with an address they aren't
-authorized to send with in our map file (smtpd_recipient_restrictions). Please
-note that *reject_sender_login_mismatch* comes at the beginning of the
-smtpd_recipient_strictions field. This is key. It is so key in fact, that I
-missed it (I only miss the key stuff of course thanks Murphy). This was the
-forth exploit attempt that got me.
-
-
-[[misordering-smtpd_recipient_restrictions]]
-Misordering smtpd_recipient_restrictions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This one is the final bit that let the spammers in (so far at least).
-
-The smtpd_recipient_restrictions are restrictions that you can place on
-the users and their emails based on various things. In my case, I had
-the following restrictions string
-
-----
-smtpd_recipient_restrictions = permit_mynetworks, permit_sasl_authenticated, reject_sender_login_mismatch, reject_unauth_destination
-----
-
-Postfix applies these restrictions in the order in which they are specified. As
-they put it <blockquote>Restrictions are applied in the order as specified; the
-first restriction that matches wins.</blockquote> As soon as one restriction
-matches, then the ones that follow don't get applied. This was very
-problematic because in my case permit_mynetworks is first. So that I can log
-in from my cell phone which has an IP address that changes, I set
-
-----
-mynetworks = 0.0.0.0/0 127.0.0.0/8 [::fff:127.0.0.0]/104 [::1]/128
-----
-
-which allows any IP address to connect to my SMTP server. Since Postfix takes
-the first match and goes no further and any IP address is in 0.0.0.0/0, anyone
-can send mail through my SMTP server. This = bad.
-
-What you should do is start your restrictions with the the most strict
-restrictions followed by the less strict. In my case, that looks like
-
-----
-smtpd_recipient_restrictions = reject_sender_login_mismatch, permit_sasl_authenticated, reject_unauth_destination
-----
-
-In the event someone tries to send an email, first they must login. If they
-don't log in, they are rejected due to reject_sender_login_mismatch (we can't
-do a match if we don't have a sender username). Secondly, once logged in, the
-user must be authorized to use the address they are trying to send as as
-specified in the smtpd_sender_login_maps line. Finally, once the user has been
-authenticated and they have permissions to use the address they are trying to
-send as, their email is not rejected. It follows that they are then filtered
-through permit_sasl_authenticated. This basically runs a check to see if they
-are authenticated (which we know they already are because of the previous
-filter) and since they are, they are permitted and Postfix stops looking for
-more matches because it's found one that permits the user to perform their
-requested action.
-
-As chef Elzar says, "Bam!"
-
-
-Category:Linux
-Category:Postfix
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Server_Administration:Firewalls.ascii b/src/Server_Administration:Firewalls.ascii
deleted file mode 100644
index 9c7450f..0000000
--- a/src/Server_Administration:Firewalls.ascii
+++ /dev/null
@@ -1,41 +0,0 @@
-Server Administration:Firewalls
-===============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hello again all, The majority of the servers that I manage have to communicate
-with many other servers I manage for various reasons. Inevitably, I have many
-firewall blocking issues. Despite my age, I still have trouble remembering all
-of those commonly used ports and what they are for. That being said, this post
-will list all of the Windows default firewall ports used for the various
-software sources. Yes these are all readily available at other sites. This is
-simply a central collection.
-
-I will update this post when I have more blocks come up.
-
-Post comments if you would like to have a particular port added to the list.
-
-[cols=",",width="50%"]
-|===================================================
-|FTP |21
-|HTTP |80
-|HTTPS |443
-|POP3 |110
-|SMTP |25
-|SQL Server Management Studio (remote connect) |1433
-|Terminal Services |3389
-|VMWare Server Administration |8222
-|VMWare Server Administration (Secure) |8333
-|===================================================
-
-Let me know in the comments section if there are any ports you would like added
-to this list.
-
-
-Category:Networking
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Sidebar.ascii b/src/Sidebar.ascii
deleted file mode 100644
index f05a6be..0000000
--- a/src/Sidebar.ascii
+++ /dev/null
@@ -1,5 +0,0 @@
-- Navigation
- - link:index.html[Main]
- - link:about.html[About]
-
-// vim: set syntax=asciidoc:
diff --git a/src/Startup_Sounds_with_SLiM.ascii b/src/Startup_Sounds_with_SLiM.ascii
deleted file mode 100644
index de65a2f..0000000
--- a/src/Startup_Sounds_with_SLiM.ascii
+++ /dev/null
@@ -1,74 +0,0 @@
-Startup Sounds with SLiM
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-For a pretty long time I've been running Xfce for my desktop and SLiM as my
-login manager to save on boot time. This weekend though, I decided that a
-second or two added to my boot time wouldn't bother me too much if it made my
-system a little less utilitarian.
-
-Inevitably, the first places to look are (completely unecessary, yes I know) a
-fancy theme for Xfce (sprinkle a little transparency in there to taste), new
-icons, cool theme for my login manager, and a startup sound.
-
-Most of that was really easy. The startup sound on the other hand is something
-not so well documented (especially with SLiM). I dabbled around a bit and had
-an idea that believe it or not, worked on the first try.
-
-First off, I hit up good 'ol gnome-look.org for some system sounds to try out
-and settled finally on the
-http://gnome-look.org/content/show.php/%22Borealis%22+sound+theme?content=12584[Borealis
-sound scheme].
-
-Once you've got the sound file you want (I used Exit1_1.wav for mine), actually
-getting it to run on startup is actually really simple. All you need to do is
-add a line to your rc.conf file that has mplayer open up your sound you want
-played. For example...
-
-----
-mplayer /home/username/.sounds/Exit1_1.wav
-----
-
-One minor thing here for you SysV users out there you don't have an rc.local
-file. To do this with a system that uses System V for initialization (Debian,
-most forks of Debian I believe, Fedora, etc.), you need to create a script at
-*/etc/init.d/local* (you can call it whatever, but for the purposes of this,
-we'll call it local). Once the script is created, add the following lines
-(referencing my example above)...
-
-----
-#!/bin/sh
-mplayer /home/username/.sounds/Exit1_1.wav
-----
-
-Now that we've added those, we need to run
-
-----
-update-rc.d /etc/init.d/local defaults 80
-----
-
-and you should be good to go.
-
-Now, there is one concern here that could potentially cause some issues.
-If you shut your computer down with the volume turned up, when your
-computer boots back up again, you'll get a nice loud system startup
-sound. To remedy this, we simply need to reset our volume before we call
-mplayer in our script. To do this, we just add the following line before
-the mplayer line:
-
-----
-amixer set Master 30%
-----
-
-That sets our volume to 30%. If you want to set it higher or lower, you can
-just change that percentage value.
-
-Category:Linux
-Category:SLiM
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Streaming_Audio_Over_SSH.ascii b/src/Streaming_Audio_Over_SSH.ascii
deleted file mode 100644
index aab6d05..0000000
--- a/src/Streaming_Audio_Over_SSH.ascii
+++ /dev/null
@@ -1,71 +0,0 @@
-Streaming Audio Over SSH
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-At home, I have a server/desktop running nearly 24/7
-(https://archlinux.org[Arch Linux] if anyone is wondering). I use this server
-for all kinds of experiments. My backups are there (well, it's one of my backup
-locations). My home dlna server is there. It's also hooked up to my sound
-system for http://musicpd.org[mpd] so I can have it play my music, controllable
-by any device on my home wifi. Recently however, I wanted to be able to stream
-my laptop's audio over my sound system, without having to plug it in directly.
-The reason being I wanted to stream Spotify over said sound system, but didn't
-want to go to the hassle of plugging in a keyboard and mouse, and installing a
-GUI and plugging my server in to a monitor, just so I can occasionally listen
-to music through not-so-bad speakers. Then I wondered, you can do just about
-anything with SSH, why not try to stream audio over it. Here's how I do it
-(there are many other ways).
-
-[[requirements]]
-== Requirements
-
-The server (the computer hooked up to the sound system) needs *mplayer*
-installed so it'll have something to play the audio with.
-
-The audio source system (my laptop in this case) needs alsa-utils installed,
-specifically for the *arecord* application.
-
-Obviously both the server and the audio source system need ssh installed (and
-the daemon running on the server).
-
-
-[[command]]
-== Command
-
-Not too much to say here.
-
-----
-arecord -c 1 -r 32000 | ssh <user>@<server> 'mplayer -demuxer rawaudio -rawaudio channels=1:rate=32000:samplesize=1 -nocache -'
-----
-
-So what that command does is...
-
-arecord::
- Is a command line program for recording from audio devices. If no output file
- is specified (like in this case), it writes what it records to stdout. For
- our purposes, we pipe stdout to ssh in the next command.
-
-ssh...mplayer::
- Here we send stdout from the previous command (hence the pipe) straight to
- the server over ssh. Mplayer on the server plays what it receives from stdin
- (the final - ). The rest of the mplayer flags are just for audio quality
- control (same for the flags on arecord). The -nocache reduces delay a bit,
- but in some cases can cause skipping, so you might want to remove that
- switch.
-
-
-There is one caveat to this. While it works fine for streaming internet radio
-or any other audio you want really, streaming audio for a video source doesn't
-work nearly as well. On my setup, there is about a .75 second delay, so YouTube
-videos don't sync up. Otherwise though this works swimmingly.
-
-
-Category:Linux
-Category:SSH
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Synchronizing_Playlists_with_a_Sansa_Fuze_and_Media_Monkey.ascii b/src/Synchronizing_Playlists_with_a_Sansa_Fuze_and_Media_Monkey.ascii
deleted file mode 100644
index cde9853..0000000
--- a/src/Synchronizing_Playlists_with_a_Sansa_Fuze_and_Media_Monkey.ascii
+++ /dev/null
@@ -1,62 +0,0 @@
-Synchronizing Playlists with a Sansa Fuze and Media Monkey
-==========================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-My fiance has a Sansa Fuze. It works well for her most of the time, except when
-she wants to synchronize her playlists with it. She also uses
-http://www.mediamonkey.com/[Media Monkey] for her library management.
-Apparently, in recent months Media Monkey has made various updates to their
-software that have broken all kinds of stuff (making updates now mandatory of
-course to keep things working). One of the things to break was, of course,
-playlist syncing.
-
-Now, her playlists will sync. It will send the playlist file along with all the
-music it references to the player, however the playlist file (.m3u) is corrupt
-and shows no songs when viewed on the Fuze. When I cracked one of the playlist
-files open, I noticed it was referencing the partition on her laptop's hard
-drive (H: in this case). That clearly can't be right. To make a long story
-short, I played with Media Monkey (version 4.0.3) for some time until I figured
-out how to automate the synchronization process without having to go back and
-manually edit some playlist files (though I guess a batch script could do the
-trick...ugh).
-
-To sum up the solution, you need to head on over to your Sansa Fuze's options
-screen in Media Monkey and set it up to match the following:
-
-image:files/Sansa-Fuze-MM-Playlist-Options.jpg[height=500]
-
-So basically, what each of those settings does is
-
-* Put *#EXTM3U* at the top of each playlist (Use extended M3U)
-
-* Use relative paths for files (ie: Music\Fiest... rather than
- D:\Music\Feist...) (Force relative paths)
-
-* Set the playlist's location in the root path rather than in your
- Playlists directory (Destination directory \ )
-
-A sample of a good playlist file that should work with your Fuze looks
-like
-
-----
-#EXTM3U
-Music\Unknown Unknown\00 Stuck In the Middle With You.mp3
-Music\Feist Unknown\00 1 2 3 4.mp3
-Music\White Rabbits Unknown\00 Percussion Gun.mp3
-Music\Unknown Artist Unknown\07 Scumbag.wma
-----
-
-Finally, one semi-related tip on this. I noticed that synchronizing her Fuze in
-MTP mode was really slow. If you switch it to MSC, your file transfers will go
-much faster (I was able to get a song every two to three seconds).
-
-
-Category:Media_Monkey
-Category:Music
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Sysprepping_Server_2008.ascii b/src/Sysprepping_Server_2008.ascii
deleted file mode 100644
index 0d3ac4f..0000000
--- a/src/Sysprepping_Server_2008.ascii
+++ /dev/null
@@ -1,76 +0,0 @@
-Sysprepping Server 2008
-=======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Hello all,
-
-This my first post on this blog (if it wasn't obvious enough), but I'm going to
-spare you all the boring 'welcome to the blog' and get down to business before
-I forget what I am blogging about here.
-
-I do need to mention a few things before getting started though
-
-My name is Dirk and I work at the College of Business in my university,
-specifically on the web development team. I program for SharePoint but also
-enjoy building and maintaining servers. My current project is to build and
-maintain ten virtual (Hyper-V) Server 2008 development servers for the team.
-Each team member needs a semi-isolated development environment with a unique
-SharePoint instance so if their tests crash it, it does not affect any other
-people.
-
-Alright, now that that's out of the way, here's the good stuff...
-
-Now, continuing along the same lines as the subject of this post, I have been
-researching the unattend.xml file for sysprepping a system for the last week in
-an effort to find a template I could fill out for our latest Server '08 builds.
-A guy from another section of our IT department has a sysprep file from Windows
-XP, which apparently doesn't work for Server 2008 (well...it was worth a try
-though).
-
-All week I have been Googling things like 'create unattend.xml', 'generate
-unattend.xml', 'sysprep unattend.xml', 'sysprep unattend file', and various
-other searches that escape me now. Today I thought I'd try sysprep.xml because
-I recalled seeing that somewhere. Low and behold, I discovered (as the first
-search result), the exact website I needed. It has a template sysprep.xml file
-that was actually designed for my exact circumstance (incredible, yes?).
-
-Here's the link:
-
-http://msmvps.com/blogs/ad/archive/2008/03/18/windows-server-2008-sysprep-xml-help.aspx
-
-To use the sysprep.xml file, I have a batch file I created that runs the
-command...
-
-----
-C:\Windows\System32\sysprep\sysprep.exe /oobe /generalize /shutdown /unattend:C:\ProgramData\sysprep\sysprep.xml
-----
-
-/generalize:: Removes all unique identifiers of the machine.
-
-/shutdown:: Specifies that the machine shutdown after the sysprep process
- rather than restarting
-
-/unattend:: Process after reboot.
-
-Many thanks Brian. You brought my seemingly endless search for a sysprep
-template file to an end.
-
-To end on an even more positive note, after sysprepping the server build, I did
-not have to respond to a single prompt with the exception of logging in as
-Administrator. It did everything for me.
-
-Regards,
-
-Dirk
-
-
-Category:Windows
-
-Category:Microsoft
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/TFS_2008:Permissions_for_Creating_Team_Projects.ascii b/src/TFS_2008:Permissions_for_Creating_Team_Projects.ascii
deleted file mode 100644
index 49f4c42..0000000
--- a/src/TFS_2008:Permissions_for_Creating_Team_Projects.ascii
+++ /dev/null
@@ -1,96 +0,0 @@
-TFS 2008:Permissions for Creating Team Projects
-===============================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently finished my Team Foundation build and am quite pleased with it. With
-Team Foundation Server being new to me, I ran into a few issues with
-permissions.
-
-The Team Foundation server I built will be managing my team's code as well as
-two other teams once everyone has moved over.
-
-That being said, I need to give out permissions for the managers of each group
-so they can create team projects for their teams.
-
-
-[[lets-get-started]]
-Let's Get Started
------------------
-
-image:files/01TeamExplorerTeamProjects.PNG[height=300]
-
-Permissions for this are somewhat complicated in that they have to be
-given in *for the user(s) to be able to create Team Projects and all of
-the other services associated with one.
-
-We'll start off with the **TFS permissions**.
-
-image:files/02GroupMemberships.PNG[height=400]
-
-By default, the service account has permissions to perform these actions so you
-should be logged in as that account.
-
-Head to your Team Explorer in Visual Studio and connect to your Team
-Foundation Server. Right-click your server name, go to **Team Foundation
-Server Settings**, and select **Group Membership**. From within here you
-should see a group titled **Team Foundation Administrators**.
-Double-click this group and add your user/group to it and you're done
-with the TFS part.
-
-'''*NOTE* '''To keep the Administrators group from becoming cluttered
-with usernames and group names, I created a TFS group for Administrators
-( for instance) and simply added that group to Team Foundation
-Administrators.
-
-Next up we'll tackle **SQL Reporting Services permissions**.
-
-image:files/03SQLServerReportingServices.PNG[height=250]
-
-For this one you want to go to your SQL Reporting Services page (mine was
-http://servername/Reports ).
-
-Once there, click the *Properties* tab. Click *New Role Assignment* on
-the page that loads. From here, enter the active directory username or
-group name you want to have permissions to create team projects in TFS
-and assign them * the *Content Manager* role. Once you're done, click
-*OK* and you're done with the permissions for SQL Reporting Services.
-
-Finally, **Windows SharePoint Services permissions**.
-
-image:files/04CentralAdministration.PNG[height=350]
-
-Head to your central administration site (Start -&gt; Administrative Tools
--&gt; SharePoint 3.0 Central Administration). Once there, click the
-*Operations* tab at the top left. On that page, select *Update farm
-administrators's group* (it's under the Security Configuration group). From
-here, click *New* to add a new user (the button also has a drop down function
-so if you get that, click **Add User**). On the **AddUser: Central
-Administration page**, type in the username or groupname and add them to the
-*Farm Administrators [Full Control]* group.
-
-There you have it. You should now be good to add as many Team Projects
-as you desire.
-
-[[a-closing-thought]]
-A Closing Thought
------------------
-
-I would recommend that the majority of this be done through active
-directory groups. It makes usermanagement much easier. If someone quits
-or it let go, all you have to do is remove their account from the group
-in active directory and it takes care of everything in Sharepoint, TFS,
-and SQL Reporting services instead of having to manually go in and
-remove the person from every location.
-
-On a side note, I'm going to get some lunch...
-
-Category:Microsoft
-
-Category:Team_Foundation_Server
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Team_Foundation_Server_2010_Beta_1_Configuration.ascii b/src/Team_Foundation_Server_2010_Beta_1_Configuration.ascii
deleted file mode 100644
index ca86854..0000000
--- a/src/Team_Foundation_Server_2010_Beta_1_Configuration.ascii
+++ /dev/null
@@ -1,78 +0,0 @@
-Team Foundation Server 2010 Beta 1 Configuration
-================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-After finishing my last post on the installation of Team Foundation Server 2010
-Beta 1, I closed down for the day and went home (now now, no judging...that was
-a long post). Today I'm back to go over the configuration of TFS 2010 Beta.
-
-If you're coming to this blog post from my last one on the installation of TFS
-2010 Beta, you will have just restarted your server and the Team Foundation
-Server configuration screen should be up. That's where we'll be starting here.
-
-image:01_Welcome.jpg[height=300]
-
-At the first configuration page, you must decide which confiruation path you
-want to take. The descriptions that the page gives are quite helpful in
-deciding which path to take since they give "You want to use this if..." and
-"You don't want to use this if..." sections for each option. For my purposes
-(Single-server installation with Sharepoint not installed yet and using the
-default instance of SQL Server), the *Default Configuration* will suffice.
-Click **Next**.
-
-The next page of the configuration wizard (if Default Configuration was
-selected) simply indicates that some tests will be performed on the server to
-determine if certain pre-requisites are met for a proper configuration. Click
-**Next**.
-
-image:03_Enter_Service_Account.jpg[height=300]
-
-Here you are prompted for a service account. This account will be used as the
-service account to run Windows Sharepoint Services (WSS) and SQL Reporting
-Services. For my instance, I created a domain user account called TFSWSSService
-(creative, yeah?). If you want, you can click *Test* to confirm that the
-username and password work. After typing in the requested username and
-password, click **Next**.
-
-image:04_Rediness_Checks_Successful.jpg[height=300]
-
-Here the configuration wizard runs tests on your server to confirm everything
-is installed properly. The first time through, I received a warning on the
-first test because my firewall was disabled (I talked about that in the last
-post regarding installing TFS). Since we're all learning here, I elected to
-re-enable my firewall so I could have the problems that accompany closed
-firewall ports (the more problems you have, the better you get at
-troubleshooting the given system, right?). Click **Next**.
-
-image:05_Applying_Configuration_Settings.jpg[height=300]
-
-Here's where the real fun begins...if you can classify sitting around for about
-20 minutes watching a looping progress bar as fun.
-
-image:06_Success.jpg[height=300]
-
-Once the configuration is complete and assuming you had no errors or warnings
-on previous screens, you should seen a screen that says what we all love to
-exclaim at the end of a long project...SUCCESS. The great thing is that if you
-click **Next**, it continues to say Success as well as telling you where you
-can find the configuration log. How considerate. Click **Close**.
-
-That's it for now. If you followed my last post on the installation of TFS, you
-probably noticed that I installed Team Build as well as TFS. My next post will
-be on the configuration of Team Build.
-
-Thanks for reading.
-
-Dirk
-
-
-Category:Microsoft
-
-Category:Team_Foundation_Server
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Team_Password_Management.ascii b/src/Team_Password_Management.ascii
deleted file mode 100644
index b48fb36..0000000
--- a/src/Team_Password_Management.ascii
+++ /dev/null
@@ -1,112 +0,0 @@
-Team Password Management
-========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-A while back I started looking for alternate means to manage my passwords,
-specifically because I started playing more with pgp encryption. I thought it'd
-be neat to be able to use pgp to encrypt a password database and/or use git to
-version the passwords. It turns out that someone had the idea before I did: the
-developers of password-store.
-
-Password-store, or pass, is a [very impressive] command line bash script that
-uses git to version passwords, and pgp keys to encrypt/decrypt each password.
-Specifically to this post though, it implements support for something that
-pgp/gpg supports: the --encrypt-to switch.
-
-
-== gpg --encrypt-to
-
-The --encrypt-to switch for the gpg command allows for encryption of the given
-stream to multiple recipients. For the purposes of password management, it
-allows for each user of the password database to add their pgp key to the
-_.gpg-id_ file. The effect is that each subsequent save of the given password
-re-encrypts it using every pgp key listed in the .gpg-id file.
-
-Effectively, each user of the password repo can have their own password (the
-password to their pgp privat key), whilst not knowing the passwords other
-members are using. This means that if for example, an employee leaves the
-company, the remaining repo members can just remove that person's key from the
-\.gpg-id file, and all further changes (regenerations) of the passwords will
-not be encrypted with the departed employee's key, thus revoking their access.
-
-
-== Setup
-
-Setup for this is fairly simple, if you're accustomed to using git and gpg/pgp.
-The commands for pass are very intuitive.
-
-To create a pass database (assuming you already have it installed), execute...
-
-----
-pass init user@gpg-id-to-be-used.com
-----
-
-To add other user's pgp keys, just add their ids to the .gpg-id file located at
-\~/.password-store/.gpg-id. Each password created after that will be encrypted
-to each user listed in that file.
-
-Note: Remember that each key that you're adding to the .gpg-id file must at
- least have marginal trust in gpg.
-
-
-== Questions
-
-=== What about arbitrary users adding themselves to .gpg-id?
-
-The nice thing about gpg is that it will not allow usage of the --encrypt-to
-switch (amongst other switches) without a measure of trust given the key in
-question. This means that if any user does add their key to the .gpg-id file,
-every subsequent password change will yield an error, indicating that the
-password file cannot be encrypted to the given untrusted key.
-
-Another perk to pass is that it versions all changes to the password "database"
-in git, so the user who added their key to the .gpg-id file will have left a
-log entry (assuming they didn't rewrite history to conceal their subterfuge),
-and thus they can be dealt with appropriately.
-
-
-=== What if I want to run more than one database?
-
-Add the following to your .bashrc file.
-
-----
-#
-# Function to override calls to pass binary. Allows for multiple password-store
-# backends. Based on the first argument given to "pass", selects a different
-# password store backend.
-#
-# Example Usage:
-# # Will edit default password-store foo
-# pass edit foo
-#
-# # Will edit alternate, team, password-store foo
-# pass team edit foo
-#
-function pass {
- alt='team'
- if [[ ${1} == ${alt} ]]; then
- export PASSWORD_STORE_DIR=~/.password-store.${alt}
- # Shift off the first argument
- shift
- else
- export PASSWORD_STORE_DIR=~/.password-store
- fi
-
- # Call the actual binary
- /usr/bin/pass ${@}
-}
-----
-
-That will override calls to the pass binary (usually /usr/bin/pass),
-intercepting the first argument. If the first argument is team, it will look in
-\~/.password-store.team for passwords. If the first argument is not team, then
-it looks in the default location, ~/.password-store.
-
-
-Category:Security
-Category:Encryption
-// vim: set syntax=asciidoc:
diff --git a/src/Theming_Gnome-Shell_Intro.ascii b/src/Theming_Gnome-Shell_Intro.ascii
deleted file mode 100644
index e09a87c..0000000
--- a/src/Theming_Gnome-Shell_Intro.ascii
+++ /dev/null
@@ -1,48 +0,0 @@
-Theming Gnome Shell:Intro
-=========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-For the last several months, I've been running gnome-shell full time. For
-those of you who don't know what this is, it is version 3 of Gnome, one of the
-many available Linux graphical user interfaces (see
-http://en.wikipedia.org/wiki/GNOME[here] for more info). We are currently on
-something like 2.3 for the stable release of Gnome I believe.
-
-With this new major release of Gnome, its developers significantly changed the
-interface in an attempt to simplify window and virtual desktop management,
-bringing Linux just one step closer to being more "user friendly".
-
-Along with all of this change came a new method for theming and tweaking the
-look and feel of things. In the past, Gnome used multiple configuration files
-that were cumbersome to edit. In this new release, Gnome has switched over to
-using...you guessed it... CSS! How exciting.
-
-Continuing on, for those of you who don't know what CSS is, it's basically a
-"programming language" (I know I know, it's not technically a programming
-language)  used primarily to style websites. In most cases this takes the form
-of setting the background image or color, font size, family and color, and
-various other style-related things on the interwebz. This is really great
-because standards are already in place for CSS making Gnome-shell much easier
-to theme and learn to theme.
-
-If anyone reading this blog doesn't personally know me (which is quite likely
-since Google knows more people than I do), I'm basically addicted to theming my
-computer. If you ask my girlfriend, while we were in college together she would
-be doing homework and I would theme my computer for hours on end when I SHOULD
-have been doing my homework. When Gnome-shell came out, I got addicted pretty
-quickly.
-
-This post is a precursor to my theme postings. I've made so many out of boredom
-(and that I just can't seem to find quite the right look) that I feel kind of
-bad not posting them for all to use. I will also write a few posts regarding
-how to theme Gnome-shell as well. But first, some themes!!!
-
-Category:Linux
-Category:Gnome
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Transferring_Files_from_Windows_to_Linux.ascii b/src/Transferring_Files_from_Windows_to_Linux.ascii
deleted file mode 100644
index 4dee122..0000000
--- a/src/Transferring_Files_from_Windows_to_Linux.ascii
+++ /dev/null
@@ -1,40 +0,0 @@
-Transferring Files from Windows to Linux
-========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently ran into an interesting situation. I needed to transfer some files
-to two soon-to-be Linux web servers. These servers however, could not be
-modified in any way other than transferring this file. In other words, I
-couldn't install samba or ftp installed, right?).
-
-After looking around, I found a program called
-http://winscp.net[http://winscp.net/ winscp]] that will do just the trick.
-Here's how it's done.
-
-Head on over to http://winscp.net[http://winscp.net/] and download the
-appropriate files (I downloaded the portable executable because I didn't want
-to install).
-
-After you've done that, load up winscp and you should see a window looking
-like...
-
-image:files/Winscp01.jpg[height=300]
-
-Fill in your hostname, ip address, username, and password and hit enter. That
-should take you to a window looking something like...
-
-image:files/Winscp02.jpg[height=400]
-
-After that, simply drag and drop your files from the left to the right, or vice
-versa.
-
-Category:Linux
-Category:Windows
-Category:SCP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Ubuntu_-_Installing_Sun_Java.ascii b/src/Ubuntu_-_Installing_Sun_Java.ascii
deleted file mode 100644
index dcad72a..0000000
--- a/src/Ubuntu_-_Installing_Sun_Java.ascii
+++ /dev/null
@@ -1,26 +0,0 @@
-Ubuntu:Installing Sun Java
-==========================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-*Note to self:*
-
-Ubuntu no longer includes the Java repos in their default builds.
-
-To install Java on a recent Ubuntu machine (9.04 and up I believe), use the
-following command line commands to install it.
-
-----
-sudo add-apt-repository "deb http://archive.canonical.com/ lucid partner"
-sudo apt-get update sudo apt-get install sun-java6-jdk
-----
-
-
-Category:Linux
-Category:Ubuntu
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Ubuntu_Bridging_Network_Interfaces.ascii b/src/Ubuntu_Bridging_Network_Interfaces.ascii
deleted file mode 100644
index 94ec3a5..0000000
--- a/src/Ubuntu_Bridging_Network_Interfaces.ascii
+++ /dev/null
@@ -1,126 +0,0 @@
-Ubuntu:Bridging Network Interfaces
-==================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have recently been struggling with configuring an Ubuntu server to allow
-bridging network interfaces. I had one working long ago on another test
-machine, but it was overwritten with a new install. That being said, for quite
-some time today I researched this and never really found an answer. I did
-however find a few websites that eluded to possible methods for doing this.
-After piecing said methods together, I managed to bridge four Ethernet ports
-together.
-
-All that being said,
-
-
-[[heres-what-i-needed-to-do...]]
-== Here's what I needed to do...
-
-I have four ethernet ports on this awesome quad core xeon processor (hehe...I
-have to brag a little bit at least) powered server. One port plugs into the
-switch and provides the box with access to the interwebz. Another port goes to
-another server, supposing to bring the int3rw3bz to that box as well. The third
-port goes to a wireless router, providing wireless access to the 1nt3rw3bz.
-
-Let's see how poor my spelling of 1nt3rw3bz can get by the end of this...
-
-[[example-assumptions]]
-=== Example Assumptions
-
-You have at least two network adapters. In this case I have
-four Ethernet adapters. This post will be working with those four.
-
-
-[[how-to-do-it]]
-=== How to do It
-
-Run
-
-----
-sudo apt-get update
-----
-
-to make sure that all of your repositories know of the latest software.
-
-After that, run
-
-----
-sudo apt-get install bridge-utils
-----
-
-This will install the necessary software to seamlessly bridge network
-interfaces.
-
-Now...
-
-Using your favorite text editor, crack open /etc/network/interfaces
-
-----
-sudo vim /etc/network/interfaces
-----
-
-If you haven't done any manual customization of network interfaces yet, you
-should see something like...
-
-----
-auto lo iface lo inet loopback
-----
-
-After this entry, type in
-
-----
-auto iface inet dhcp bridge_ports <interface> <interface> <interface>
-----
-
-For my specific situation, I used...
-
-----
-auto br0 (or auto <bridgename>) iface br0 inet dhcp bridge_ports eth3 eth0 eth1 eth2
-----
-
-After that, type
-
-----
-sudo /etc/init.d/networking restart
-----
-
-... and that will bring online your bridge along with all the bridged ports.
-
-**If you need your box to have a statically assigned ip address**, don't assign
-it to the interface with the physical internet connection (in my case, eth3).
-Instead, assign it to the bridge itself.
-
-In a situation like mine, your bridge interface would look like...
-
-----
-auto br0
-iface br0
-inet static
-address 10.0.1.185
-netmask 255.255.255.0
-network 10.0.1.0
-broadcast 10.0.1.255
-gateway 10.0.1.1
-bridge_ports eth3 eth0 eth1 eth2
-----
-
-There you have it. A network bridge between as many interfaces as you want (or
-at least the four I tested it with). This of course will work with wireless
-interfaces as well, such as bridging an ethernet port to a wireless connection,
-essentially allowing a machine physically connected to a computer with wireless
-to not have to physically be connected to a wireless router (internet comes in
-through the wireless card and piped through to the ethernet port).
-
-Happy bridging everyone!
-
-
-Category:Linux
-Category:Ubuntu
-Category:Networking
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Ubuntu_Reinstall.ascii b/src/Ubuntu_Reinstall.ascii
deleted file mode 100644
index de4b883..0000000
--- a/src/Ubuntu_Reinstall.ascii
+++ /dev/null
@@ -1,31 +0,0 @@
-Ubuntu Reinstall
-================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-Hello all,
-
-As of late I have been testing out a foolishly large amount of software that
-was in most cases beta. This has led to some boot performance issues. Needless
-to say, I have more residual uninstall files than Captain Kirk has dramatic...
-pauses.
-
-All that goes to say that this evening I will be re-installing Ubuntu 9.10
-Karmic Koala in an attempt to return my boot times to 24 seconds instead of the
-1 minute that it has become...what can I say, I'm used to the semi-annual fresh
-reinstall of Windows . :P
-
-Because of this re-install, I will undoubtedly be running into many of the
-issues I encounter after my initial switch from Windows to Ubuntu. This is
-gonna be good...
-
-Keep watching for some posts about the issues I re-encounter.
-
-
-Category:Ubuntu
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Updating_SSH_Keys_Across_an_Environment.ascii b/src/Updating_SSH_Keys_Across_an_Environment.ascii
deleted file mode 100644
index becdea2..0000000
--- a/src/Updating_SSH_Keys_Across_an_Environment.ascii
+++ /dev/null
@@ -1,347 +0,0 @@
-Updating SSH Keys Across an Environment
-=======================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Most Linux environments with any number of servers uses keys to perform tasks
-from simple manual administration to gathering manifests, backing up config
-files across an environment, and really any kind of automation. Why? Because
-passwords are terrible things (how's that for indignant). Seriously though,
-despite the risks passwords present with minimum constraints not being
-appropriately set or enforced, at least passwords make authentication and
-semi-secure security accessible to people. Of course, keys are preferable, but
-not reachable for the general public. Enough about my philosophical ramblings
-about security though. I have several servers that I run (including this one)
-that all use keys almost exclusively for logins. Like passwords, keys should be
-cycled through frequently as well and if you have things set up right, that
-should be completely painless. Here's the script I wrote to bulk change
-passwords across my entire environment.
-
-----
-#!/usr/bin/env bash
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-# @author Nullspoon <nullspoon@iohq.net>
-#
-
-manifest=''
-key=''
-action=''
-id=''
-user=''
-
-#
-# Backups by a specific ssh key to <date_modified>.<key_name>
-#
-# @param ssh_base string Path to where the ssh keys and configs are stored
-# @param key string Name of the key to backup
-#
-# @return string The filename of the key backup
-#
-function backupKeys {
- local ssh_base=$1
- local key=$2
- moved=0;
- date=""
- priv_ls=$(ls -l --time-style='+%Y%m%d%H%M%S' ${ssh_base}/${key})
- date=$(echo "${priv_ls}" | tr -s ' ' | cut -d ' ' -f 6);
- # Rename the old key
- if [[ -e "${ssh_base}/${key}" ]]; then
- mv ${ssh_base}/${key} ${ssh_base}/${date}.${key}
- moved=1;
- fi
- # Rename the old public key
- if [[ -e "${ssh_base}/${key}.pub" ]]; then
- mv ${ssh_base}/${key}.pub ${ssh_base}/${date}.${key}.pub
- moved=1;
- fi
- if [[ ${moved} == 0 ]]; then
- echo ''
- else
- chmod 700 ${ssh_base}/*
- echo "${ssh_base}/${date}.${key}"
- fi
-}
-
-#
-# Pushes specific public key to remote user's authorized_keys
-#
-# @param user string User owning the authorized_keys file to be modified
-# @param server string Server the user's authorized_keys file is on
-# @param old_key string The key to use for authentication
-# @param new_key string The key, public or private, to be pushed
-#
-function pushKey {
- local conn=$1
- local old_key=$2
- local new_key=$3
- if [[ ${#new_key} -lt '4' ]]; then
- echo "Key to be pushed is not a public key."
- exit
- fi
-
- ispub=$(keyIsPublic ${new_key})
- if [[ ${ispub} == 0 ]]; then
- # Append .pub because a public key wasn't specified
- new_key="${new_key}.pub"
- fi
-
- local cmd="if [[ ! -d ~/.ssh/ ]]; then mkdir ~/.ssh/; fi"
- cmd="${cmd} && echo '$(cat ${new_key})' >> ~/.ssh/authorized_keys"
-
- # Unset our identity file if it doesn't exist
- local id_file="-i ${old_key}"
- if [[ ${old_key} == '' ]]; then
- id_file=''
- fi
- contents=$(cat ${new_key})
- ssh -q ${id_file} ${conn} "${cmd}"
-}
-
-#
-# Removes the specified public key from a remote user's authorized_keys file
-#
-# @param user string User owning the authorized_keys file to be modified
-# @param server string Server the user's authorized_keys file is on
-# @param key string The key to use for authentication which is to be removed
-#
-function removeRemoteKey {
- local conn=$1
- local key=$2
- pub_key=''
- priv_key=''
- ispub=$(keyIsPublic ${key})
- if [[ ${ispub} == 0 ]]; then
- priv_key="${key}"
- pub_key="${key}.pub"
- else
- priv_key="${key:0:-4}"
- pub_key="${key}"
- fi
- contents=$(cat "${pub_key}")
- local cmd="if [[ ! -d ~/.ssh/ ]]; then mkdir ~/.ssh/; fi"
- cmd="${cmd} && cat ~/.ssh/authorized_keys | grep -v '${contents}' "
- cmd="${cmd} > ~/.ssh/auth_keys"
- cmd="${cmd} && mv ~/.ssh/auth_keys ~/.ssh/authorized_keys"
- ssh -q -i ${priv_key} ${conn} "${cmd}"
-}
-
-#
-# Determines if the specified key is public (or not which would be private).
-#
-# @param key string Path to the key to check
-#
-# @return int Whether or not the key is public
-#
-function keyIsPublic {
- key=$1
- if [[ ${#key} -lt '4' ]]; then
- echo 0;
- fi
- # Check the extension
- ext=${key:$((${#key}-4)):${#key}}
- if [[ ${ext} == '.pub' ]]; then
- echo 1;
- fi
- echo 0
-}
-
-#
-# Generates a new ssh key of the length 4096
-#
-# @param filepath string Path to where the new ssh key will be written
-# @param bits int Number of bits in the new key (eg: 2048, 4096, 8192, etc.)
-#
-function genKey {
- local filepath=$1
- local bits=$2
- ssh-keygen -b ${bits} -f "${filepath}" -N ''
-}
-
-#
-# Prints the help text
-#
-function getHelp {
- echo
- echo -n "Manages ssh keys en masse. Designed to perform pushes, "
- echo " removals, and creations of ssh keys on lists of servers."
- echo
- echo "Usage: keymanage.sh action --manifest systems.list"
- echo -n " -m, --manifest Text file containing a list of systems, "
- echo "delimited by new lines."
- echo -n " [-k, --key] Path to a key to perform an action "
- echo "(push or remove) with."
- echo -n " [-i, --id] Key to use for automated logins. Not "
- echo "used when performing an update."
- echo -n " [-u, --user] Username on remote systems to work on "
- echo "(defaults to root)."
- echo
-}
-
-function parseArgs {
- argv=(${@})
- # Parse the arguments
- for(( i=0; i<${#argv[*]}; i++ )); do
- if [[ ${argv[$i]} == '-h' || ${argv[$i]} == '--help' ]]; then
- getHelp
- exit
- elif [[ ${argv[$i]} == '-m' || ${argv[$i]} == '--manifest' ]]; then
- manifest=${argv[$i+1]}
- i=$(( ${i} + 1 ))
- elif [[ ${argv[$i]} == '-k' || ${argv[$i]} == '--key' ]]; then
- key=${argv[$i+1]}
- i=$(( ${i} + 1 ))
- elif [[ ${argv[$i]} == '-i' || ${argv[$i]} == '--id' ]]; then
- id=${argv[$i+1]}
- i=$(( ${i} + 1 ))
- elif [[ ${argv[$i]} == '-u' || ${argv[$i]} == '--user' ]]; then
- user=${argv[$i+1]}
- i=$(( ${i} + 1 ))
- else
- action=${argv[$i]}
- fi
- done
-
- # Enforce some business rules
- echo
- exit=0;
- if [[ ${action} == '' ]]; then
- echo "Please specify an action.";
- echo " Available actions: push, remove, update."
- echo
- exit=1;
- fi
- if [[ ${manifest} == '' ]]; then
- echo "Please specify a manifest file."
- echo " Example: keymanage.sh action [-m|--manifest] ./systems.txt"
- echo
- exit=1;
- fi
- if [[ ${exit} == 1 ]]; then
- exit
- fi
-}
-
-#
-# Determines the path to the parent directory containing a file.
-#
-# @param filepath string Path to the file to get the parent directory for
-#
-# @return string Path to the file's parent directory
-#
-function getFilePath {
- filepath=$1
- filename=$(basename ${filepath})
- echo ${filepath} | sed "s/\(.*\)${filename}/\1/"
-}
-
-#
-# Push main function. One param because the rest are global
-#
-function keyPush {
- argv=( ${@} )
- if [[ ${id} == '' ]]; then
- echo "No identity file specified (-i). This will likely be painful."
- fi
- for (( i=0; i<${#argv[*]}; i++ )); do
- dest=${argv[$i]}
- if [[ ${id} == '' ]]; then
- pushKey "${dest}" '' ${key}
- else
- pushKey "${dest}" ${id} ${key}
- fi
- echo "Key ${key} added for ${dest}."
- done
-}
-
-#
-# Update main function. One param because the rest are global
-#
-function keyUpdate {
- argv=( ${@} )
- ssh_base=$(getFilePath ${key})
- filename=$(basename ${key})
- # Backup our old key
- backup_key="$(backupKeys ${ssh_base} ${filename})"
-
- # Let's get to work on that new key
- genKey "${key}" 4096
-
- for (( i=0; i<${#argv[*]}; i++ )); do
- dest=${argv[$i]}
- if [[ ${backup_key} == '' ]]; then
- echo "No current key exists."
- echo "Skipping backup and removal from remote."
- # Push the new key
- pushKey "${dest}" '' ${key}
- else
- # Push the new key
- pushKey "${dest}" ${backup_key} ${key}
- # Clean up the old key from our remote
- removeRemoteKey "${dest}" "${backup_key}"
- fi
- echo "Key ${key} updated for ${dest}."
- done
-}
-
-#
-# Remove main function. One param because the rest are global
-#
-function keyRemove {
- argv=( ${@} )
- for (( i=0; i<${#argv[*]}; i++ )); do
- dest=${argv[$i]}
- removeRemoteKey "${dest}" "${key}"
- echo "Key ${key} removed from ${dest}."
- done
-}
-
-#
-# The main function
-#
-function main {
- # Parse our script args
- # Believe me, this is a lot better than the alternatives
- parseArgs ${@}
-
- destinations=( $(cat ${manifest}) )
- # Key required
- if [[ ${key} == '' ]]; then
- echo -n "Please specify a key (-k) to ${action}."
- echo
- exit
- fi
-
- # Let's start doing stuff
- if [[ ${action} == 'push' ]]; then
- keyPush ${destinations[*]}
- elif [[ ${action} == 'update' ]]; then
- keyUpdate ${destinations[*]}
- elif [[ ${action} == 'remove' ]]; then
- keyRemove ${destinations[*]}
- fi
-}
-
-main ${@}
-----
-
-
-Category:Linux
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Upgrading_TFS_2008_Workgroup_to_TFS_2008_Standard.ascii b/src/Upgrading_TFS_2008_Workgroup_to_TFS_2008_Standard.ascii
deleted file mode 100644
index 3edd743..0000000
--- a/src/Upgrading_TFS_2008_Workgroup_to_TFS_2008_Standard.ascii
+++ /dev/null
@@ -1,54 +0,0 @@
-Upgrading TFS 2008 Workgroup to TFS 2008 Standard
-=================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-About a month ago I was assigned the task of learning how to build and maintain
-a team foundation server. To avoid the costs of purchasing licenses for a
-software we were only testing for a production environment, we decided to use
-our MSDNAA copy for our test. Incidentally, the version MSDNAA distributes to
-schools is the **workgroup edition**.
-
-After the build was completed, I decided that Microsoft's Visual Studio Team
-Foundation Server would do everything (and more) that we needed. Due to legal
-restrictions, I couldn't/shouldn't use the MSDNAA license for a production
-environment. Additionally, the workgroup license for TFS only supports five
-users, hardly enough for my team's purposes.
-
-Naturally I wanted to avoid have to reinstall Team Foundation Server since
-simply inserting the new license key would be the easiest thing to do, if the
-software supported it. I searched around the web for a bit and found a
-Microsoft article on upgrading from workgroup to standard, but it was for TFS
-2005. None-the-less, it was worth a shot. Coicidentally, it was not too far
-off. Here's how I upgraded my copy and a problem I ran into in the process.
-
-If you go into your *Programs and Features* (if you're using Server 2003 go to
-**Add or Remove Programs**) on your server that TFS is isntalled on, double
-click the uninstaller for **Microsoft Visual Studio 2008 Team Foundation Server
-ENU**(assuming your copy is English). On the window that comes up you should
-see an option at the bottom to upgrade to Team Foundation Server. Check that
-radio button and enter the new license key you have and click Next. You * see a
-window that says upgrade was successful.
-
-The problem I had with this method was that when I checked the radio button,
-the license key text boxes remained greyed out. When I clicked Next, hoping to
-see a place to input the new license key, I was met with a screen that said my
-upgrade was successful, though it wasn't.
-
-Insert/mount your Team Foundation Server disk/image and run the installer. It
-should give you options to repair, uninstall, or upgrade. In my case, the
-upgrade option automatically showed the license key for my disk. I selected
-that option, clicked Next, and my edition was upgraded to TFS standard.
-
-Tada!
-
-
-Category:Microsoft
-
-Category:Team_Foundation_Server
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Using_SpiderOak_with_Qt_4.7.2.ascii b/src/Using_SpiderOak_with_Qt_4.7.2.ascii
deleted file mode 100644
index 1dde898..0000000
--- a/src/Using_SpiderOak_with_Qt_4.7.2.ascii
+++ /dev/null
@@ -1,57 +0,0 @@
-Using SpiderOak with Qt 4.7.2
-=============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I recently updated my KDE version to 4.6, a revolutionary update if I say so
-myself. Suddenly version 4 of KDE is awesome.
-
-All was well with this update until I tried to run SpiderOak. It booted and
-then failed with no errors. Upon running it from command line to see the
-output, it returned the following error:
-
-----
-Cannot mix incompatible Qt library (version 0x40702) with this library (version 0x40701) Aborted
-----
-
-How depressing is this? As it turns out, SpiderOak versions 9810 and before are
-intended for use with Qt from an earlier version of KDE.
-
-After some time of messing around with libraries and symbolic links, I found
-the solution.
-
-When SpiderOak starts, it apparently does a library version check. If you check
-the Spideroak library directory (**/usr/lib/SpiderOak**), you will find that
-there are many libraries that presumably SpiderOak uses. At least, I thought
-that was the case. Now I think (though this may not be correct) that those
-libraries are there to perform the version check because if you overwrite them
-with more up-to-date libraries, everything works. With that, here's how we do
-this.
-
-Log in as root (or sudo bash) and type the following commands...
-
-----
-cp /usr/lib/libQtCore.so.4 /usr/lib/SpiderOak cp /usr/lib/libQtGui.so.4
-/usr/lib/SpiderOak cp /usr/lib/libQtNetwork.so.4 /usr/lib/SpiderOak
-----
-
-This will overwrite the library files in the SpiderOak directory with symbolic
-links (the files we copied were already links to *.so.4.7.2) pointing to the
-most up-to-date versions of the libraries on your machine.
-
-Hope this helps someone. I haven't tested much to confirm that this doesn't
-cause any problems, but I can confirm that SpiderOak is semi-running for me (no
-bugs yet).
-
-
-Category:Linux
-
-Category:KDE
-
-Category:Qt
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Vim:Frequently_Used_Bits_and_Doodads.ascii b/src/Vim:Frequently_Used_Bits_and_Doodads.ascii
deleted file mode 100644
index f4b4b4e..0000000
--- a/src/Vim:Frequently_Used_Bits_and_Doodads.ascii
+++ /dev/null
@@ -1,204 +0,0 @@
-Vim:Frequently Used Bits and Doodads
-====================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-I have a friend (yes, you Zeke) who is considering changing from a graphical
-editor to a CLI editor, namely vim, for his web developement. His primary
-hesitation though is the learning curve for vim, and rightfully so. Coming from
-a GUI with menus to an environment you can't even exit unless you know what
-you're doing already is pretty intimidating (or fun
-https://www.youtube.com/watch?v=pY8jaGs7xJ0[if that's what you're into]).
-
-I understand this well. When I first started in vim, I was quite taken
-back by the tremendous amount of documentation. Lots of documentation
-[hopefully] means lots of functionality. Lots of documentation is great
-but if you don't know where to start, it doesn't do you
-http://hyperboleandahalf.blogspot.com/2010/04/alot-is-better-than-you-at-everything.html[alot]
-of good. Here I'm hoping to narrow things down a bit for you folks
-thinking of learning vim.
-
-
-[[two-modes]]
-== Two Modes
-
-Before we start anywhere else, you need to know that there are two modes for
-vim: *command mode* and **insert mode**. Since vim is a command line editor, it
-doesn't [technically] support a mouse (it kind of does, but we won't get into
-that) which means all of your controls are keyboard-based. However, if you also
-use the keyboard keys to write text, you're going to have a lot of conflicts -
-hence two modes.
-
-When you open a text file, you are first put in command mode. That means that
-your whole keyboard won't insert text. Instead, it interprets all key presses
-as commands. The first command you'll want is the letter __i__. This will put
-you into insert mode. Once in insert mode, all key presses will insert text. To
-get/escape out of insert mode, hit the escape key a la top left of yonder
-http://search.dilbert.com/comic/Kybard[kybard].
-
-
-[[frequently-used-features]]
-== Frequently Used Features
-
-
-[[find]]
-=== Find
-
-Finding in vim is actually fairly simple. You just have to remember that it
-searches in two directions: forwards and backwards. To search fowards in
-thedocument from the present position of the cursor, type the following in
-command mode
-
-----
-/searchterm
------
-
-To search backwards in the document from the present position of the cursor,
-type the following in command mode <pre>?searchterm</pre>
-
-
-[[replace]]
-=== Replace
-
-This one is pretty complex unfortunately. However, for those of you who like to
-weild the power of the gods, find and replace in vim uses
-http://www.regular-expressions.info/[regex].
-
-I won't get heavily into this because this topic is so broad and can take so
-much time to learn because of how robust it can be. You can actually put this
-piece on your resume and you'll likely get quite a few bites.
-
-A basic find and replace would perform a replace on the entire document. For
-example, we want to replace the word _foo_ with __bar__. To do this, do the
-following (actually type the : at the beginning of the expression):
-
-----
-:%s/foo/bar/
-----
-
-That will replace all instances of foo with bar from the beginning to the end
-of the document (the % here means the whole file), unless there is more than
-one instance of foo on a particular line. That will only replace the first
-instance found on each line. To replace all for real this time, just append the
-letter "g" for "global" (at least I think that's what it stands for).
-
-----
-:%s/foo/bar/g
-----
-
-
-[[code-folding]]
-=== Code Folding
-
-Depending on the distribution of Linux you are using, code folding may or may
-not be enabled already. To enable it however, you need to type exactly the
-following command in command mode: +:set foldmethod=indent+. You could also put
-that in your +\~/.vimrc+ file and it will enable it for all future vim
-sessions. Next up, here's how you open and close folded code...
-
-**za/A**: Toggles code folding. Capital A toggles all folds inside of the
- currently selected fold.
-
-**zc/C**: Closes the folded code. Capital C closes all folds inside of the
- currently selected fold.
-
-**zo/O**: Opens the folded code. Capital O opens all folds inside of the
- currently selected fold.
-
-**zr/R**: Opens the first level of folded code throughout the file. Capital R
- will open all folds, including nested.
-
-**zm/M**: Closes the first level of folded code throughout the file. Capital M
- will close all folds, including nested.
-
-I pretty much only every use zR (open all), zM (close all), and zA (toggle all
-nested under currently selected).
-
-
-[[syntax-highlighting]]
-=== Syntax Highlighting
-
-Depending on what distro of Linux you use, syntax highlighting may or may not
-be enabled for you by default. If it isn't already, there are two ways to turn
-'er on.
-
-When in command mode, type exactly (with the preceeding colon) <code>:syntax
-on</code>. That should enable syntax highlighting for you. If it doesn't, it's
-possible either the file you are editing doesn't have a known extension (eg:
-.pl for Perl, .php for PHP, .cpp for C+\+, etc) or it doesn't start with a
-http://en.wikipedia.org/wiki/Shebang_%28Unix%29[shebang] that indicates what
-the language is (eg: #!/usr/bin/perl).
-
-
-[[line-numbers]]
-=== Line Numbers
-
-I don't typically put this piece into my .vimrc file because I don't always
-like to see line numbers, especially when I need to copy and paste code.
-However, they do come in handy occasionally. For instance, when you're working
-on a really big perl script and you want to know what line you are presently on
-so when you close the file you can come back and hit ":742" and you're back
-where you left off. To show or hide line numbers respectively, try the
-following commands
-
-.Turn on line numbers
-----
-:set nu
-----
-
-.Turn off line numbers
-----
-:set nonu
-----
-
-
-[[reading-in-the-output-of-a-command]]
-=== Reading in the Output of a Command
-
-This is super handy for me very often. When I'm editing a file and I need to
-put the the output of a command (typically data that I need) into my document
-for further parsing, this really saves the day. Without this, I'd have to close
-vim (or open a new terminal), then run the command and redirect the output to
-append to the end of my file. If I need it somewhere else in the file though, I
-then have to reopen the file in vim and and move the data around. Here's how we
-can read in output from a command. For this example, we'll use the output of
-__ifconfig -a__. Put the cursor where you want the data to go and then in
-command mode, type
-
-----
-:read !ifconfig -a
-----
-
-
-[[visual-mode]]
-=== Visual Mode
-
-This one is a real live saver. If any of you have used vi, you likely know the
-wonders of this bit of functionality. For those of you who don't know, in old
-vi, to make a change to more than one line, you had to perform the action and
-tell vim to apply it to the current line and however many lines forwards. That
-means that you have to count lines. If you're going to delete say, 87 lines,
-that's a really big pain. With visual mode, we can highlight the lines we want
-to modify (delete, shave some characters off the front, indent, unindent, etc)
-and simply perform the action and it will be applied to all highlighted lines.
-
-To do this, in command mode, hit <code>Shift+v</code> (or capital V for short).
-Move the cursor up or down and you will see lines being highlighted. To delete
-those lines, hit the _d_ key. To indent them, hit the _>_ key. To unindent, hit
-the _<_ key. To copy/yank them, hit the _y_ key.
-
-To escape visual mode without making changes, just hit the escape key.
-
-
-[[an-awesome-cheatsheet]]
-=== An Awesome Cheatsheet
-
-http://tnerual.eriogerg.free.fr/vimqrc.html
-
-Category:Linux Category:Unix Category:Vim Category:Editors
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Visual_Studio_2010_Debugging_in_Remote_Locations.ascii b/src/Visual_Studio_2010_Debugging_in_Remote_Locations.ascii
deleted file mode 100644
index 022b28d..0000000
--- a/src/Visual_Studio_2010_Debugging_in_Remote_Locations.ascii
+++ /dev/null
@@ -1,35 +0,0 @@
-I was recently writing a server status report program to check the statuses of
-servers and produces a text file report (stunning good naming scheme I know)
-and I ran into an error that was quite perplexing mostly because it shouldn't
-have been happening for various apparent reasons. On launch of a unit test, I
-received the error
-
-----
-Could not load file or assembly 'file:///<insert network drive path here><insert project path here>binDebugsome.dll' or one of its dependencies.
-Operation is not supported. (Exception from HRESULT: 0x80131515)
-----
-
-This is quite the problem since unit testing is a wonderful thing sent down
-from God Himself to bless us developers (who ever said God wasn't good?). **The
-problem here is that Visual Studio won't load in untrusted assemblies**, and
-assemblies on a networked drive are not considered trusted, *. That being said,
-to fix this problem, all we need to do is allow remote sources to be loaded in.
-Here's how...
-
-Open up **C:\Program Files\Microsoft Visual Studio
-10.0\Common7\IDE\devenv.exe.config**. Near the top of the configuration file
-(mine was line 10) you should see an xml parent of **<runtime>**. Directly
-beneath that add *<loadFromRemoteSources enabled="true" />*
-
-image:files/devenvConfig.jpg[height=300]
-
-Save and close out *devenv.exe.config* and restart visual studio. On restart,
-you should now be able to debug using assemblies in remote locations.
-
-
-Category:Microsoft
-
-Category:Visual_Studio
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/When_Innovation_is_Appropriate.ascii b/src/When_Innovation_is_Appropriate.ascii
deleted file mode 100644
index f24fbf5..0000000
--- a/src/When_Innovation_is_Appropriate.ascii
+++ /dev/null
@@ -1,119 +0,0 @@
-When Innovation is Appropriate
-==============================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I have recently found myself very burned out on IT. Don't get me wrong, I love
-doing anything related to building systems of any kind. I love to design
-systems and I love programming (not that those two are very distinguishable).
-Modular design that implements seperation of concerns well is like poetry to
-me. All that goes to say that I still enjoy working with computers in most
-capacities. What's really burned me out is the constant fight.
-
-I work in a heavily regulated industry with a team that is made up of half
-Linux guys and half AIX/IBM guys. I find that the Linux engineers are fighting
-with the AIX guys about ways to do things. They don't want to use any product
-that doesn't come with an enterprise support package (IBM support is
-preferable). They always give the excuse "Well do you want to be the one at
-3:00 in the morning who's trying to figure out some obscure problem by yourself
-with management breathing down your neck, or do you want a third party who you
-can point the finger at and send a ticket to?". The thing about that mentality
-though is that pointing the finger at a vendor doesn't get your systems up any
-faster, nor does it better you as an engineer.
-
-I disagree with this mentality (as evidenced with this post). My feelings on
-the matter are that my employer is paying me money to be the best at my job
-that I can be. That goes without saying that it brings me tremendous
-satisfaction to be great at my job. That means learning to support whatever
-weird stuff is already in place and engineering better solutions for the ones
-that break. After all, why bandage a problem when you can solve the problem all
-together.
-
-
-[[two-schools-of-thought]]
-== Two Schools of Thought
-
-All this goes to highlight two different mentalities (yes, I know how cliche
-this sounds). One group of people asks "why", and the other asks "why not".
-
-The "why" people will often look at a problem and think only inside the box the
-problem's circumstances provide. If an outside the box solution is considered,
-that solution is likely only inside of another box (from one proprietary
-solution to another). They consider outside the box thinkers to be reckless and
-"cowboy" because outside the box in many cases entails making ones own
-solution.
-
-The other group, the "why not" folks, tend to view the "why" people as closed
-minded and paralyzed with indecision. They mentally roll their eyes when they
-hear the phrase "enterprise support" and often look at budgets and say "Why are
-we paying so much money when we can do such simple work for free".
-
-Granted, these are generalizations of course. Not all of the above mentalities
-apply globally and neither do they apply in their implied levels. These
-attitudes are spectrums and do not accurately describe everyone in either
-group.
-
-
-[[i-personally...]]
-== I Personally...
-
-When I see a problem at work, my first reaction is not to look for a paid
-solution that's going to cost loads of money and make finding employees with
-experience with the solution harder. The way I view it, if you pay for a
-software so expensive that only a fortune 200 has the resources to buy it, you
-are limiting your pool of hireable people down to those who have ever worked at
-a fortune 200. Instead I go in search of an open source product that is
-established well enough to be able to handle the problems we throw at it (eg:
-puppet, apache, clustered mariadb, openstack, kvm, native Linux tools, etc). If
-one does not exist and the problem is still surmountable without needing an
-entire development team, I try to build my own solution using design best
-practice and then I document it like my job depends on it (code comments, class
-architecture design documents, database schema design documents, etc). The way
-I see it, building my own solutions gives me better understanding of how
-already existing solutions work. It also helps because it gets me to research
-better ways to do something. From the business' perspective though, they need
-to find a developer to maintain my product when I am gone, so in these cases an
-enterprise solution might be better.
-
-
-[[a-short-list]]
-== A Short List
-
-Here's a short list of people and companies who have asked why not (notice how
-they're all world renound innovators)...
-
-Google is [seemingly] one of those companies who has a lot of people working
-for it that ask why not. They are experimenting with low traffic server farms
-that use ARM processors to save on electricity. Twitter is built on Ruby, an
-open source language, because it actually can do the job and do it well (why
-not when the alternative is licensing IIS, Windows, MsSql and using .Net).
-Facebook (despite the problems I have with them) is built on PHP and when that
-wasn't fast enough for them, they built their own php to c++ converter. The
-Linux kernel which now runs the majority of the servers on planet earth is
-built by people sitting at their jobs and at home, donating their time because
-the alternatives aren't good enough, and again, why not? OpenStack is used and
-developed by NASA, an organization who has safely sent people to space and
-back, and Rackspace, one of the biggest hosting providers in the world.
-Wikipedia, one of most frequently visited websites in the world, is built for
-free on PHP and MariaDB because again, why not? Have you ever seen Wikipedia
-crash? The http://en.wikipedia.org/wiki/Lustre_%28file_system%29[Lustre
-filesystem] is an open source load balanced filesystem that runs 60 of the
-world's top 100 super computers and 6 of the top 10. NASA also uses it.
-
-
-[[in-conclusion]]
-== In Conclusion
-
-It's people asking "why not" that brought us things like HTTP, SSH, Apache, pgp
-encryption, multithreading, fiber and copper communications, radio-based
-networking (WiFi), and so much more. I seriously doubt that I will ever make
-anything nearly as cool or world shaping as
-http://www.goodreads.com/quotes/10286-if-i-have-seen-further-it-is-by-standing-on[the
-work upon which everything I have made is built], but I can at least try in the
-effort to not perpetuate bad methodologies and maybe contribute if even a
-little to the knowledge base that is so readily available to all of us.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Whitelist_MediaWiki_Namespaces_with_$wgWhitelistRead.ascii b/src/Whitelist_MediaWiki_Namespaces_with_$wgWhitelistRead.ascii
deleted file mode 100644
index 1bd597d..0000000
--- a/src/Whitelist_MediaWiki_Namespaces_with_$wgWhitelistRead.ascii
+++ /dev/null
@@ -1,189 +0,0 @@
-Whitelist MediaWiki Namespaces with $wgWhitelistRead
-====================================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-MediaWiki is designed for the most part to be an open document repository. In
-most setups (presumably), everyone can read and only registered users can edit.
-However, permissions can't get much more granular than this. For my project at
-least, I would like to not just limit anonymous users from editing, I would
-like to selectively limit them from reading certain things.
-
-I looked around for quite some time until I came upon a variable you can set in
-your LocalSettings.php file: **$wgWhitelistRead**. Basically, this variable
-whitelists the pages specified in the array. The downside to this is you can't
-use wildcards or namespaces/categories. You must specify a single page per
-array value. This doesn't quite cut it for my needs. That being said, here's my
-solution (albeit rough).
-
-The end goal here looks like this...
-
-* All users are blocked from reading and writing all pages
-* Users in all groups are then given read access to the whitelisted namespaces
-* Finally, users in the specified groups have read and write access to all
- pages (save for the administration/sysop pages of course).
-
-
-[[limiting-all-access]]
-== Limiting All Access
-
-To do this, in your LocalSettings.php file, place the following four lines...
-
-----
-$wgGroupPermissions['*']['read'] = false;
-$wgGroupPermissions['*']['edit'] = false;
-$wgGroupPermissions['user']['read'] = false;
-$wgGroupPermissions['user']['edit'] = false;
-----
-
-
-[[granting-sysop-access]]
-== Granting Sysop Access
-
-Once you have the lines in the last section in your config file, your entire
-wiki should be unavailable, even to sysop people (they are users after all). To
-give access back to your sysop folk, place the following two lines in your
-LocalSettings.php file
-
-----
-$wgGroupPermissions['sysop']['read'] = true;
-$wgGroupPermissions['sysop']['edit'] = true;
-----
-
-This will only grant access to your sysop authenticated users. If they're not
-already authenticated, they still can't get to the Special:UserLogin form
-(we'll get to that in just a few) to login. They may be sysops at heart, but
-hearts don't authenticate people without usernames and passwords.
-
-
-[[granting-individual-group-access]]
-== Granting Individual Group Access
-
-Now that our sysops have permissions, next we need a custom group so we can
-grant permissions to them. We'll call that group 'GreenTea' (yes, I'm drinking
-some green tea right now). To do that, let's throw another few lines in the
-LocalSettings.php file...
-
-----
-$wgGroupPermissions['greentea'] =
-$wgGroupPermissions['user']; $wgGroupPermissions['greentea']['read'] =
-true; $wgGroupPermissions['greentea']['edit'] = true;
-----
-
-
-[[granting-minimal-global-permissions]]
-== Granting Minimal Global Permissions
-
-Now that our group is set up, we need to whitelist the necessary and wanted
-pages for anonymous folk to log in and/or do their thing depending on what
-groups they are in. To do this, let's add yet another few lines to our
-LocalSettings.php file
-
-----
-$wgWhitelistRead = array(
-  'Main Page',
-  'Special:Userlogin',
-  'Special:UserLogout',
-);
-----
-
-What we just did was whitelist the main page, the login page, and the logout
-page. This allows users to get in and out of your wiki, whether or not their
-permissions allow them access to anything. At this point, you can log in with
-your sysop user and put people into our previously created 'greentea' group.
-Once that's done, the greentea users should have full access to the entire
-wiki.
-
-I would like to note here that that this point, users outside of the greentea
-group will have the same permissions as anonymous/unauthenticated users. They
-cannot read or edit any pages other than the ones currently whitelisted.
-
-
-[[editing-mediawiki-to-whitelist-namespaces]]
-== Editing MediaWiki to Whitelist Namespaces
-
-This is the only part that's out of the ordinary here. We are going to edit
-actual MediaWiki code. The big downside to doing this is that if the MediaWiki
-instance is upgrade, it is highly likely that the changes made in this section
-will be overwritten. Thankfully though, the changes are very simple, so making
-them again shouldn't be a problem. They're so simple in fact, I think the
-MediaWiki folks might actually accept my code into their branch.
-
-To set up our MediaWiki instance so it handles regex whitelist statements, we
-need to edit the Title.php file in the includes directory.
-
-Firstly, we need to comment out the code that processes the whitelist variable.
-Head to around line 1870 in Title.php and comment out just the following lines
-
-----
-//Check with and without underscores
-if ( in_array( $name, $wgWhitelistRead, true ) || in_array( $dbName, $wgWhitelistRead, true ) )
- return true;
-----
-
-
-Now that those have been commented out, we need to add in the code that will
-process regex statements in the whitelist array. Below the lines you just
-commented out, add the following code...
-
-----
-foreach ( $wgWhitelistRead as $item )
- if ( preg_match( '/^'.$item.'$/', $name )
- || preg_match( '/^'.$dbName.'$/', $name ) ) return true;
-----
-
-
-[[usage]]
-== Usage
-
-To use the changes we just put in place, all that needs to be done is edit the
-$wgWhitelistRead variable in LocalSettings.php again.
-
-Say, for example, that we have a 'HowTo' namespace ('HowTo:Drink Green Tea' for
-example) that we want everyone to be able to read that isn't in the greentea
-group (they have to learn somehow after all). All that needs to be done is a
-little regex...
-
-----
-$wgWhitelistRead = array(
- 'Main Page',
- 'Special:Userlogin',
- 'Special:UserLogout',
- 'HowTo:.*',
-);
-----
-
-That just whitelisted all pages inside the 'HowTo' namespace.
-
-
-[[a-bad-explanation-attempt]]
-== A Bad Explanation Attempt
-
-In case anyone who doesn't know is wondering why you put a *.** at the end of
-the HowTo namespace, here you go.
-
-In regular expressions, various symbols have different meanings. In this case,
-the period signifies any case letter, number, symbol, etc. That means that
-'HowTo:.' would match anything like 'HowTo:A', 'HowTo:3', 'HowTo:-', etc. It
-would however not match 'HowTo:A123'. Why? The period in regular expressions
-matches only one character. What we need is to say match any character any
-number of times after 'HowTo:'. For that we'll need the asterisk.
-
-The asterisk in regular expressions is what we call a quantifier. It doesn't
-represent a character so much as a quantity. In non regex terms, an asterisk
-means that the previous character in the regex string can be repeated zero or
-more times and still match. That means that the regular expression 'c*' would
-match nothing, 'c', 'cccc', 'cccccc', etc. It would however not match for
-example, 'b', '5', '12345a', etc. In our example, 'HowTo:.*', the period
-represents any character and it is followed by an asterisk, so that means that
-any article that starts with 'HowTo:' will match, no matter what the ending,
-even if it doesn't have one.
-
-Hopefully someone finds this post useful. If anyone has questions about *.**
-please ask them in the comments.
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Writing_an_Array_to_Sql_Conversion_Function.ascii b/src/Writing_an_Array_to_Sql_Conversion_Function.ascii
deleted file mode 100644
index 1471584..0000000
--- a/src/Writing_an_Array_to_Sql_Conversion_Function.ascii
+++ /dev/null
@@ -1,35 +0,0 @@
-Writing an Array to Sql Conversion Function
-===========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-Lately I've been doing a lot of converting arrays from key-value pairs to SQL
-insert statements. I've been doing it so much in fact that it became pretty
-apparent I would need a toSql function to keep from duplicating this code. With
-that, here's my function. Hopefully it comes in handy for some of you.
-
-----
-private function toSql($KeysValues) {
- // Parse from array to quoted csv
- $keys=implode(',',array_keys($KeysValues));
- $values='\''.implode('\',\'',array_values($KeysValues)).'\'';
- return array($keys, $values);
-}
-----
-
-This spits out an array with a key string and a value string encased in single
-quotes. To use this all you need is
-
-----
-<?php
-$data = toSql($KeysValuesArray);
-$sql = 'INSERT INTO test_table ('.$data[0].') VALUES ('.$data[1].')';
-?>
-----
-
-
-Category:MySQL
-Category:PHP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/XMPP_Error:_404:_Remote_Server_Not_Found.ascii b/src/XMPP_Error:_404:_Remote_Server_Not_Found.ascii
deleted file mode 100644
index f5ffff5..0000000
--- a/src/XMPP_Error:_404:_Remote_Server_Not_Found.ascii
+++ /dev/null
@@ -1,66 +0,0 @@
-XMPP Error: 404: Remote Server Not Found
-========================================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I recently moved myself away from yet another Google service (nothing against
-you Google I just like having control over my stuff) and am now running my own
-XMPP server. I have a rather interesting situation though. Since I use Google
-Apps, I already had Google Talk set up for my domain. This wasn't too bad a
-problem since you can just disable that. The real problem arose when I tried
-to create my XMPP server on a server not directly referenced by my DNS A
-records. We'll say the server is located at chat.bitnode.net for this example.
-
-The issue arose that when I configured the server so the users' addresses were
-user@bitnode.net instead of user@chat.bitnode.net. When I had the users names
-@bitnode.net, I received the error
-
-----
-404: Remote Server Not Found
-----
-
-on all of my friend requests. At first, I thought that was because my jabber
-server couldn't talk to the Google server where my friend's accounts were
-located. Then I realized (unfortunately hours later), that it was an error
-being returned by Google's servers because they couldn't find MY server at the
-location the usernames indicated (IE: bitnode.net). My guess is this has
-something to do with server dialback.
-
-So, a quick rundown of where we are to make sure we're all on the same page...
-An example of a username is jimminy@bitnode.net. The jabber server is located
-at chat.bitnode.net. The username indicates that the chat server is located at
-bitnode.net, which is not the case.
-
-Now the problem is pretty obvious. The _404: Remote Server Not Found_ error is
-because Google's jabber servers are looking at bitnode.net when the server is
-located at chat.bitnode.net.
-
-Thankfully, the solution is relatively simple to implement, but it does
-require access to DNS for your domain. **The solution here is to put a
-few DNS SRV records in**. Due to the fact that everyone's setup is
-different, I will use the default ports for my SRV strings.
-
-----
-_xmpp-client._tcp yourdomain.net 5 0 5222 fqdn.to.server.net
-_xmpp-server._tcp yourdomain.net 5 0 5269 fqdn.to.server.net
-----
-
-So what we are saying here (at least my understanding of it is) that if an xmpp
-connection (_xmpp-server._tcp) tries to connect to yourdomain.net on port 5269
-, the traffic is seamlessly sent to fqdn.to.server.net. Some additional info,
-the 5s are priority and the 0s are weight.
-
-With that, wait fifteen or so minutes for the changes to the master zone to
-take effect and give it a try. This _should_ fix the issue with all of the
-jabber servers out there, but I have only tried this on ejabberd and OpenFire
-so far.
-
-I hope this helped someone out. Please let me know in the comments if anyone
-has any questions.
-
-Category:DNS Category:XMPP
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/Xfce_4.10_Pre_2_Review.ascii b/src/Xfce_4.10_Pre_2_Review.ascii
deleted file mode 100644
index fc47224..0000000
--- a/src/Xfce_4.10_Pre_2_Review.ascii
+++ /dev/null
@@ -1,66 +0,0 @@
-Xfce 4.10 Pre 2 Review
-======================
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-
-== {doctitle}
-
-Four days ago on April 14, 2012, http://xfce.org[Xfce] 4.10 pre 2
-http://xfce.org/about/news/?post=1334361600[was released]. It's been a while
-since 4.8 was released, so let's see how it's going.
-
-I'll just start out with this summary. Overall I'm not super impressed
-with this release so far. Don't get me wrong, I'm very excited for the
-release and I completely support the Xfce guys. They're doing good work.
-My only real complaint for this release is simply the amount that came
-with it. There just doesn't seem to be that much new stuff. There were a
-lot of bug fixes and translation updates though and for the most most
-part, it's pretty stable.
-
-Now, with that out of the way, let's take a look at a few screenshots of some
-new stuff I found. Before we do that though, I'm using the Orta theme with the
-AwokenWhite icons. Additionally, my panel is set to alpha 0, so the fancy
-integration into my background is not a part of Xfce 4.10.
-
-Alright, now to screenshots
-
-____
-image:files/00-desktop.jpg[height=400,link="files/00-desktop.jpg"]
-Here we've got just a view of the desktop. Nothing notably different here other
-than the top right where the you can see the blurred out text. That would be my
-username. We now have a user actions widget.
-____
-
-____
-image:files/02-xfce-user-actions.jpg[height=300,link="files/02-xfce-user-actions.jpg"]
-
-So here is the user actions button. There are more settings to add more stuff
-to this menu. I'm just using the default.
-____
-
-
-____
-image:files/01-xfce-settings.jpg[height=400,link="files/01-xfce-settings.jpg"]
-
-Here is the Settings window. The Xfce devs have added categorization to it now.
-You'll also notice at the bottom the "Settings Editor". That's kind of like
-Gnome's gconftool. It seems to have granular settings for Xfce. Most of them
-are just settings you can edit through the Settings dialog.
-____
-
-____
-image:files/03-xfce-window-resize-hot-edge.jpg[height=400,link="files/03-xfce-windows-resize-hot-edge.jpg"]
-
-And finally we have some functionality that I've been hoping for for some time
-now. Hot edges for resizing windows. Here I drug the terminal to the top and it
-auto resized to half of my screen. I drug the file manager to the bottom and it
-resized to half of the bottom of my screen. It also works on the left and right
-sides, but for that you have to set your workspace count to one unfortunately.
-____
-
-
-Category:Linux
-Category:XFCE
-
-// vim: set syntax=asciidoc:
diff --git a/src/Xkcd:1110.ascii b/src/Xkcd:1110.ascii
deleted file mode 100644
index 83f8d9a..0000000
--- a/src/Xkcd:1110.ascii
+++ /dev/null
@@ -1,67 +0,0 @@
-Xkcd:1110
-=========
-:author: Aaron Ball
-:email: nullspoon@iohq.net
-
-== {doctitle}
-
-I really like the webcomic http://xkcd.com[xkcd]. Its author, Randall, is
-hilarious. If you don't read this comic, you definitely should.
-
-Recently Randall http://xkcd.com/1110[drew one] that blew my mind (seriously,
-there are brains everywhere). He basically made what looks to be a 100x100
-(there are some empty tiles in there so that's not super accurate) grid of a
-sad, yet wonderful world. This world, populated by javascript, will take you a
-tremendous amount of time to scroll through. I can only imagine how much time
-this took him to make.
-
-Well, not to put all of that work to waste, but I decided I wanted to assemble
-the entire grid into a single image. The first step to that is to download the
-entire grid of images. With that, I wrote a script.
-
-Currently, that script is downloading all of that commic with a .2 second sleep
-time between images (no DOSing for me). I will post back here with a zip file
-containing every image and as soon as I have the time, I will write a script to
-automagically assemble the entire thing! I will also post that here.
-
-However, first things first (as I said). The first script to download the
-entire commic looks like so (yes, I'm sure there are more efficient ways to do
-this)
-
-----
-#!/bin/bash
-for n in {0..50..1}; do
- # Quadrant 1
- for e in {0..50..1}; do
- wget "http://imgs.xkcd.com/clickdrag/"$n"n"$e"e.png" && echo $n"n"$e"e.png"
- sleep .2;
- done
-
- # Quadrant 2
- for w in {0..50..1}; do
- wget "http://imgs.xkcd.com/clickdrag/"$n"n"$w"w.png" && echo $n"n"$w"w.png"
- sleep .2;
- done
-done
-
-for s in {1..50..1}; do
- # Quadrant 3
- for w in {0..50..1}; do
- wget "http://imgs.xkcd.com/clickdrag/"$s"s"$w"w.png" && echo $s"s"$w"w.png"
- sleep .2;
- done
-
- # Quadrant 4
- for e in {0..50..1}; do
- wget "http://imgs.xkcd.com/clickdrag/"$s"s"$e"e.png" echo $s"s"$e"e.png"
- sleep .2;
- done
-done
-----
-
-Category:xkcd
-Category:Linux
-Category:Scripting
-
-
-// vim: set syntax=asciidoc:
diff --git a/src/files/00-desktop.jpg b/src/files/00-desktop.jpg
deleted file mode 100755
index ac8d307..0000000
--- a/src/files/00-desktop.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/01-xfce-settings.jpg b/src/files/01-xfce-settings.jpg
deleted file mode 100755
index 4377f13..0000000
--- a/src/files/01-xfce-settings.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/01TeamExplorerTeamProjects.PNG b/src/files/01TeamExplorerTeamProjects.PNG
deleted file mode 100755
index 7048b70..0000000
--- a/src/files/01TeamExplorerTeamProjects.PNG
+++ /dev/null
Binary files differ
diff --git a/src/files/01_ClickSettings_-_X.jpg b/src/files/01_ClickSettings_-_X.jpg
deleted file mode 100755
index cb12063..0000000
--- a/src/files/01_ClickSettings_-_X.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/01_Open_Test.jpg b/src/files/01_Open_Test.jpg
deleted file mode 100755
index 6b46fb5..0000000
--- a/src/files/01_Open_Test.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/01_SQL_Migration_ScriptDatabaseAs.png b/src/files/01_SQL_Migration_ScriptDatabaseAs.png
deleted file mode 100755
index 40296cc..0000000
--- a/src/files/01_SQL_Migration_ScriptDatabaseAs.png
+++ /dev/null
Binary files differ
diff --git a/src/files/01_Welcome.jpg b/src/files/01_Welcome.jpg
deleted file mode 100755
index 6f0375c..0000000
--- a/src/files/01_Welcome.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/02-xfce-user-actions.jpg b/src/files/02-xfce-user-actions.jpg
deleted file mode 100755
index 50688a1..0000000
--- a/src/files/02-xfce-user-actions.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/02GroupMemberships.PNG b/src/files/02GroupMemberships.PNG
deleted file mode 100755
index 69e5cab..0000000
--- a/src/files/02GroupMemberships.PNG
+++ /dev/null
Binary files differ
diff --git a/src/files/02_Edit_Test_Run_Configurations.jpg b/src/files/02_Edit_Test_Run_Configurations.jpg
deleted file mode 100755
index 9659ba9..0000000
--- a/src/files/02_Edit_Test_Run_Configurations.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/02_HardDiskMainSettings_-_X.jpg b/src/files/02_HardDiskMainSettings_-_X.jpg
deleted file mode 100755
index a888166..0000000
--- a/src/files/02_HardDiskMainSettings_-_X.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/02_SQL_Select_Import_Data.png b/src/files/02_SQL_Select_Import_Data.png
deleted file mode 100755
index 96e91c1..0000000
--- a/src/files/02_SQL_Select_Import_Data.png
+++ /dev/null
Binary files differ
diff --git a/src/files/03-xfce-window-resize-hot-edge.jpg b/src/files/03-xfce-window-resize-hot-edge.jpg
deleted file mode 100755
index 30ca230..0000000
--- a/src/files/03-xfce-window-resize-hot-edge.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/03SQLServerReportingServices.PNG b/src/files/03SQLServerReportingServices.PNG
deleted file mode 100755
index 846e06a..0000000
--- a/src/files/03SQLServerReportingServices.PNG
+++ /dev/null
Binary files differ
diff --git a/src/files/03_Enter_Service_Account.jpg b/src/files/03_Enter_Service_Account.jpg
deleted file mode 100755
index 6de8ba5..0000000
--- a/src/files/03_Enter_Service_Account.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/03_SQL_Import_Choose_DataSource.png b/src/files/03_SQL_Import_Choose_DataSource.png
deleted file mode 100755
index 1070873..0000000
--- a/src/files/03_SQL_Import_Choose_DataSource.png
+++ /dev/null
Binary files differ
diff --git a/src/files/03_Select_Controller.jpg b/src/files/03_Select_Controller.jpg
deleted file mode 100755
index e9600b9..0000000
--- a/src/files/03_Select_Controller.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/04CentralAdministration.PNG b/src/files/04CentralAdministration.PNG
deleted file mode 100755
index e766f8f..0000000
--- a/src/files/04CentralAdministration.PNG
+++ /dev/null
Binary files differ
diff --git a/src/files/04_Answer_Prompt.jpg b/src/files/04_Answer_Prompt.jpg
deleted file mode 100755
index b9cbc3a..0000000
--- a/src/files/04_Answer_Prompt.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/04_HardDiskPreConvert_-_X.jpg b/src/files/04_HardDiskPreConvert_-_X.jpg
deleted file mode 100755
index 5d122d1..0000000
--- a/src/files/04_HardDiskPreConvert_-_X.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/04_Rediness_Checks_Successful.jpg b/src/files/04_Rediness_Checks_Successful.jpg
deleted file mode 100755
index ddf63f9..0000000
--- a/src/files/04_Rediness_Checks_Successful.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/05_Applying_Configuration_Settings.jpg b/src/files/05_Applying_Configuration_Settings.jpg
deleted file mode 100755
index 8bf623a..0000000
--- a/src/files/05_Applying_Configuration_Settings.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/06_Success.jpg b/src/files/06_Success.jpg
deleted file mode 100755
index ad9e172..0000000
--- a/src/files/06_Success.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/07_HardDiskConverting_-_X.jpg b/src/files/07_HardDiskConverting_-_X.jpg
deleted file mode 100755
index 519a2b9..0000000
--- a/src/files/07_HardDiskConverting_-_X.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/A-hotmail01.jpg b/src/files/A-hotmail01.jpg
deleted file mode 100755
index 6f10cf4..0000000
--- a/src/files/A-hotmail01.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/A-hotmail02.jpg b/src/files/A-hotmail02.jpg
deleted file mode 100755
index 0fa42e2..0000000
--- a/src/files/A-hotmail02.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Cc-sa_88x31.png b/src/files/Cc-sa_88x31.png
deleted file mode 100755
index f0a944e..0000000
--- a/src/files/Cc-sa_88x31.png
+++ /dev/null
Binary files differ
diff --git a/src/files/Cell_comparison.ods b/src/files/Cell_comparison.ods
deleted file mode 100644
index 2a76bb1..0000000
--- a/src/files/Cell_comparison.ods
+++ /dev/null
Binary files differ
diff --git a/src/files/Cell_comparison.xlsx b/src/files/Cell_comparison.xlsx
deleted file mode 100644
index 850a0ff..0000000
--- a/src/files/Cell_comparison.xlsx
+++ /dev/null
Binary files differ
diff --git a/src/files/IIS_01_Add_Role.jpg b/src/files/IIS_01_Add_Role.jpg
deleted file mode 100755
index 71379fd..0000000
--- a/src/files/IIS_01_Add_Role.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/IIS_02_Role_Services.jpg b/src/files/IIS_02_Role_Services.jpg
deleted file mode 100755
index 8ffd1eb..0000000
--- a/src/files/IIS_02_Role_Services.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2335_gsiii-no-glass-sm.jpg b/src/files/Img_2335_gsiii-no-glass-sm.jpg
deleted file mode 100755
index 9d22252..0000000
--- a/src/files/Img_2335_gsiii-no-glass-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2337_gsiii-no-glass-dirty-sm.jpg b/src/files/Img_2337_gsiii-no-glass-dirty-sm.jpg
deleted file mode 100755
index 93bb0ea..0000000
--- a/src/files/Img_2337_gsiii-no-glass-dirty-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2338_gsiii-glass-pile-sm.jpg b/src/files/Img_2338_gsiii-glass-pile-sm.jpg
deleted file mode 100755
index 747b3db..0000000
--- a/src/files/Img_2338_gsiii-glass-pile-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2343_gsiii-no-glass-clean-sm.jpg b/src/files/Img_2343_gsiii-no-glass-clean-sm.jpg
deleted file mode 100755
index 0e4c16f..0000000
--- a/src/files/Img_2343_gsiii-no-glass-clean-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2344_gsiii-new-glass-sm.jpg b/src/files/Img_2344_gsiii-new-glass-sm.jpg
deleted file mode 100755
index b0d9a8a..0000000
--- a/src/files/Img_2344_gsiii-new-glass-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Img_2348_gsiii-new-glass-and-case-sm.jpg b/src/files/Img_2348_gsiii-new-glass-and-case-sm.jpg
deleted file mode 100755
index 4a954ac..0000000
--- a/src/files/Img_2348_gsiii-new-glass-and-case-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/MgmtStudio1.jpg b/src/files/MgmtStudio1.jpg
deleted file mode 100755
index 7385d42..0000000
--- a/src/files/MgmtStudio1.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/MgmtStudio2.jpg b/src/files/MgmtStudio2.jpg
deleted file mode 100755
index 6862f91..0000000
--- a/src/files/MgmtStudio2.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter-Multiselect_Move.png b/src/files/ProPresenter-Multiselect_Move.png
deleted file mode 100755
index 734d3cc..0000000
--- a/src/files/ProPresenter-Multiselect_Move.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter-Slide_lock-locked.png b/src/files/ProPresenter-Slide_lock-locked.png
deleted file mode 100755
index 99fc397..0000000
--- a/src/files/ProPresenter-Slide_lock-locked.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter-Slide_lock-unlocked.png b/src/files/ProPresenter-Slide_lock-unlocked.png
deleted file mode 100755
index 97f059c..0000000
--- a/src/files/ProPresenter-Slide_lock-unlocked.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter_Slide_Loops00.png b/src/files/ProPresenter_Slide_Loops00.png
deleted file mode 100755
index 9503eed..0000000
--- a/src/files/ProPresenter_Slide_Loops00.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter_Slide_Loops01.png b/src/files/ProPresenter_Slide_Loops01.png
deleted file mode 100755
index de48ee9..0000000
--- a/src/files/ProPresenter_Slide_Loops01.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter_Slide_Loops02.png b/src/files/ProPresenter_Slide_Loops02.png
deleted file mode 100755
index 18fcf47..0000000
--- a/src/files/ProPresenter_Slide_Loops02.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter_Slide_Loops03.png b/src/files/ProPresenter_Slide_Loops03.png
deleted file mode 100755
index 27e5ad2..0000000
--- a/src/files/ProPresenter_Slide_Loops03.png
+++ /dev/null
Binary files differ
diff --git a/src/files/ProPresenter_Slide_Loops04.png b/src/files/ProPresenter_Slide_Loops04.png
deleted file mode 100755
index d268944..0000000
--- a/src/files/ProPresenter_Slide_Loops04.png
+++ /dev/null
Binary files differ
diff --git a/src/files/SQL_03_Instance_Configuration.jpg b/src/files/SQL_03_Instance_Configuration.jpg
deleted file mode 100755
index 1895c94..0000000
--- a/src/files/SQL_03_Instance_Configuration.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Sansa-Fuze-MM-Playlist-Options.jpg b/src/files/Sansa-Fuze-MM-Playlist-Options.jpg
deleted file mode 100755
index af546a9..0000000
--- a/src/files/Sansa-Fuze-MM-Playlist-Options.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/TFS_02_Features_to_Install.jpg b/src/files/TFS_02_Features_to_Install.jpg
deleted file mode 100755
index 59ef5a3..0000000
--- a/src/files/TFS_02_Features_to_Install.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/TFS_04_MidInstall_Restart.jpg b/src/files/TFS_04_MidInstall_Restart.jpg
deleted file mode 100755
index 347aa42..0000000
--- a/src/files/TFS_04_MidInstall_Restart.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Toshiba_Canvio_Slim.png b/src/files/Toshiba_Canvio_Slim.png
deleted file mode 100755
index cd7faf7..0000000
--- a/src/files/Toshiba_Canvio_Slim.png
+++ /dev/null
Binary files differ
diff --git a/src/files/WD_Elements.jpg b/src/files/WD_Elements.jpg
deleted file mode 100755
index d4b3c13..0000000
--- a/src/files/WD_Elements.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Winscp01.jpg b/src/files/Winscp01.jpg
deleted file mode 100755
index 8175a8b..0000000
--- a/src/files/Winscp01.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/Winscp02.jpg b/src/files/Winscp02.jpg
deleted file mode 100755
index 1e1949d..0000000
--- a/src/files/Winscp02.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/gimp271-sm.jpg b/src/files/gimp271-sm.jpg
deleted file mode 100755
index 1aed580..0000000
--- a/src/files/gimp271-sm.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/iohq-logo.png b/src/files/iohq-logo.png
deleted file mode 100755
index 50863bf..0000000
--- a/src/files/iohq-logo.png
+++ /dev/null
Binary files differ
diff --git a/src/files/pathauto-alias-strings0.jpg b/src/files/pathauto-alias-strings0.jpg
deleted file mode 100755
index bea7a49..0000000
--- a/src/files/pathauto-alias-strings0.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/terminal001.png b/src/files/terminal001.png
deleted file mode 100755
index e3ccdef..0000000
--- a/src/files/terminal001.png
+++ /dev/null
Binary files differ
diff --git a/src/files/terminal002b.png b/src/files/terminal002b.png
deleted file mode 100755
index 522235a..0000000
--- a/src/files/terminal002b.png
+++ /dev/null
Binary files differ
diff --git a/src/files/terminal003.png b/src/files/terminal003.png
deleted file mode 100755
index a42b4c5..0000000
--- a/src/files/terminal003.png
+++ /dev/null
Binary files differ
diff --git a/src/files/terminal004.png b/src/files/terminal004.png
deleted file mode 100755
index 96fc316..0000000
--- a/src/files/terminal004.png
+++ /dev/null
Binary files differ
diff --git a/src/files/wpid-screenshot_29.jpg b/src/files/wpid-screenshot_29.jpg
deleted file mode 100644
index 9935ac9..0000000
--- a/src/files/wpid-screenshot_29.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/wpid-screenshot_31.jpg b/src/files/wpid-screenshot_31.jpg
deleted file mode 100644
index 6e7024f..0000000
--- a/src/files/wpid-screenshot_31.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/wpid-screenshot_32.jpg b/src/files/wpid-screenshot_32.jpg
deleted file mode 100644
index a65287e..0000000
--- a/src/files/wpid-screenshot_32.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/wpid-screenshot_33.jpg b/src/files/wpid-screenshot_33.jpg
deleted file mode 100644
index 4b65805..0000000
--- a/src/files/wpid-screenshot_33.jpg
+++ /dev/null
Binary files differ
diff --git a/src/files/wpid-screenshot_36.jpg b/src/files/wpid-screenshot_36.jpg
deleted file mode 100644
index 914ddb5..0000000
--- a/src/files/wpid-screenshot_36.jpg
+++ /dev/null
Binary files differ
diff --git a/src/res/footer.html b/src/res/footer.html
deleted file mode 100644
index 11a6640..0000000
--- a/src/res/footer.html
+++ /dev/null
@@ -1,9 +0,0 @@
-</div>
-<div id="footer">
-This work is licensed under a
-<a href="http://creativecommons.org/licenses/by-sa/3.0/deed.en_US">Creative Commons Attribution-ShareAlike 3.0 Unported License</a>
- by Aaron Ball (nullspoon@iohq.net).
-</div>
-
-</body>
-</html>
diff --git a/src/res/header.html b/src/res/header.html
deleted file mode 100644
index c70ede5..0000000
--- a/src/res/header.html
+++ /dev/null
@@ -1,12 +0,0 @@
-<!DOCTYPE html>
-<html>
-<head>
- <link rel="stylesheet" type="text/css" href="res/style.css" />
-</head>
-<body>
-<div id="nav-header">
- <a href="index.html">
- <img src="files/iohq-logo.png" />
- </a>
-</div>
-<div id="body-content">
diff --git a/src/res/style.css b/src/res/style.css
deleted file mode 100644
index 1a9cfe4..0000000
--- a/src/res/style.css
+++ /dev/null
@@ -1,204 +0,0 @@
-body {
- color:#4f4b43;
- font-family:Oxygen-sans, sans-serif;
- background-color:#ddd;
- font-size:15px;
- margin:0px;
- padding:0px;
-}
-
-a,
-a:visited {
- color:#107d8e;
-}
-
-h1 {
- color:#006d7e;
- font-family:Oxygen-sans, sans-serif;
-}
-
-h2 {
- color:#107d8e;
- font-family:Oxygen-sans, sans-serif;
-}
-
-h3, h4 {
- color:#309dae;
- font-family:Oxygen-sans, sans-serif;
-}
-
-span.monospaced {
- font-family:mono;
- background-color:#e0e0e0;
- padding:2px;
- border:1px solid #bbb;
- border-radius:3px;
-}
-
-/**
- * Navbar Styles
- */
-div#nav-header {
- background-color:#333;
- background:linear-gradient(to bottom, #444, #333, #333, #222);
- box-shadow:0px 0px 4px #4f4b43;
- z-index:1;
- color:#eee;
- height:73px;
- margin:0px;
- padding:5px;
-}
-
-div#nav-header img {
- border:none;
- margin:4px;
-}
-
-
-/**
- * Body Styles
- */
-div#body-content {
- background:none;
- background-color:#eee;
- /*box-shadow:inset 8px 0px 10px -10px #000;*/
- border:none;
- box-shadow:inset 0px 0px 5px #4f4b43;
- margin-right:30px;
- margin-top:20px;
- margin-left:176px;
- border-radius:5px;
- padding:30px;
-}
-
-img {
- border:1px solid #888;
- border-radius:5px;
-}
-
-div.quoteblock,
-pre {
- background-color:#f5fcff;
- border:1px dotted #999;
- padding:5px 15px 7px 15px;
- border-radius:5px;
- margin:5px 0px;
- color:#555;
- font-size:1.0em;
- display:inline-block;
-}
-
-blockquote {
- background-color:#eee;
- padding:5px;
- border:1px dotted #aaa;
- border-radius:4px;
- margin-top:10px;
- margin-bottom:10px;
-}
-
-div.index-post {
- width:98%;
- margin: 10px auto 10px auto;
- border:1px solid #ccc;
- background-color:#e5e5e5;
- border-radius:5px;
- padding:10px;
-}
-
-div.index-post .readmore {
- display:block;
- width:98%;
- margin-left:0px;
- margin-right:auto;
- text-align:right;
- font-size:.9em;
-}
-
-div#footer {
- background:none;
- margin-right:30px;
- margin-top:20px;
- margin-left:196px;
- font-size:.9em;
-}
-
-div#left-navigation {
- margin-left:176px;
-}
-
-div#p-logo {
- height:85px;
- position:relative;
- top:5px;
- left:0px;
-}
-
-div#p-logo a {
- height:65px;
- width:140px;
-}
-
-
-div#p-personal { background:none; }
-
-div#p-personal a:link,
-div#p-personal a:visited {
- color:#eee;
-}
-
-
-/**
- * Table styles
- */
-table { border-collapse: collapse; }
-
-table td, table th {
- border: 1px solid #999;
- padding:1px 5px;
- background-color:#f5f5f5;
-}
-
-
-.title {
- font-weight:800;
- font-size:1.05em;
- background-color:#ddd;
- display:inline;
- padding:1px 5px;
- margin:0px;
- border-radius:4px;
- border:1px solid #bbb;
-}
-
-
-/**
- * Definition lists
- */
-dl dt {
- color:#333;
- font-size:1.04em;
- font-weight:800;
- padding-top:5px;
- line-height:.9em;
-}
-
-dl dd p {
- line-height:.5em;
- color:#555;
-}
-
-dl {
- padding:10px 0px;
-}
-
-div.terminal div pre {
- width:800px;
- padding:5px;
- margin:5px 0px;
- font-family: Courier;
- background-color:#222;
- color:#ddd;
- border:1px solid #999;
- border-radius:4px;
-}
diff --git a/src/test.ascii b/src/test.ascii
deleted file mode 100644
index 74438f3..0000000
--- a/src/test.ascii
+++ /dev/null
@@ -1,8 +0,0 @@
-This is a test
-==============
-
-== {doctitle}
-
-Testing out git post-receive hooks
-
-More text (this will all get squashed out of existence)

Generated by cgit